aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.get_maintainer.ignore1
-rw-r--r--.mailmap6
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio6
-rw-r--r--Documentation/DocBook/drm.tmpl2
-rw-r--r--Documentation/arm/SPEAr/overview.txt2
-rw-r--r--Documentation/device-mapper/cache.txt6
-rw-r--r--Documentation/device-mapper/thin-provisioning.txt9
-rw-r--r--Documentation/devicetree/bindings/arm/cpus.txt1
-rw-r--r--Documentation/devicetree/bindings/dma/apm-xgene-dma.txt2
-rw-r--r--Documentation/devicetree/bindings/drm/imx/fsl-imx-drm.txt26
-rw-r--r--Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt3
-rw-r--r--Documentation/devicetree/bindings/mfd/rk808.txt8
-rw-r--r--Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt2
-rw-r--r--Documentation/devicetree/bindings/phy/ti-phy.txt16
-rw-r--r--Documentation/devicetree/bindings/regulator/da9210.txt4
-rw-r--r--Documentation/devicetree/bindings/regulator/da9211.txt32
-rw-r--r--Documentation/devicetree/bindings/regulator/max8973-regulator.txt6
-rw-r--r--Documentation/devicetree/bindings/regulator/mt6311-regulator.txt35
-rw-r--r--Documentation/devicetree/bindings/regulator/pwm-regulator.txt65
-rw-r--r--Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt60
-rw-r--r--Documentation/devicetree/bindings/regulator/regulator.txt1
-rw-r--r--Documentation/devicetree/bindings/sound/mt8173-max98090.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/mt8173-rt5650-rt5676.txt2
-rw-r--r--Documentation/devicetree/bindings/spi/spi-ath79.txt6
-rw-r--r--Documentation/hwmon/nct79044
-rw-r--r--Documentation/input/alps.txt6
-rw-r--r--Documentation/kbuild/makefiles.txt8
-rwxr-xr-xDocumentation/target/tcm_mod_builder.py21
-rw-r--r--MAINTAINERS141
-rw-r--r--Makefile20
-rw-r--r--arch/Kconfig4
-rw-r--r--arch/alpha/include/asm/Kbuild1
-rw-r--r--arch/alpha/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/arc/Kconfig16
-rw-r--r--arch/arc/Makefile13
-rw-r--r--arch/arc/boot/dts/axc003.dtsi2
-rw-r--r--arch/arc/boot/dts/axc003_idu.dtsi2
-rw-r--r--arch/arc/include/asm/Kbuild1
-rw-r--r--arch/arc/include/asm/arcregs.h7
-rw-r--r--arch/arc/include/asm/atomic.h78
-rw-r--r--arch/arc/include/asm/bitops.h35
-rw-r--r--arch/arc/include/asm/futex.h48
-rw-r--r--arch/arc/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/arc/include/asm/ptrace.h52
-rw-r--r--arch/arc/include/asm/spinlock.h538
-rw-r--r--arch/arc/include/asm/spinlock_types.h2
-rw-r--r--arch/arc/include/uapi/asm/ptrace.h20
-rw-r--r--arch/arc/kernel/intc-arcv2.c1
-rw-r--r--arch/arc/kernel/intc-compact.c1
-rw-r--r--arch/arc/kernel/mcip.c23
-rw-r--r--arch/arc/kernel/setup.c27
-rw-r--r--arch/arc/kernel/time.c40
-rw-r--r--arch/arc/kernel/troubleshoot.c1
-rw-r--r--arch/arc/lib/memcpy-archs.S2
-rw-r--r--arch/arc/lib/memset-archs.S43
-rw-r--r--arch/arc/mm/cache.c12
-rw-r--r--arch/arc/mm/dma.c4
-rw-r--r--arch/arc/plat-axs10x/axs10x.c15
-rw-r--r--arch/arm/Makefile3
-rw-r--r--arch/arm/boot/dts/am335x-pepper.dts16
-rw-r--r--arch/arm/boot/dts/cros-ec-keyboard.dtsi4
-rw-r--r--arch/arm/boot/dts/dra7-evm.dts5
-rw-r--r--arch/arm/boot/dts/dra7.dtsi3
-rw-r--r--arch/arm/boot/dts/dra72-evm.dts5
-rw-r--r--arch/arm/boot/dts/exynos3250.dtsi2
-rw-r--r--arch/arm/boot/dts/exynos4210-origen.dts4
-rw-r--r--arch/arm/boot/dts/exynos4210-trats.dts4
-rw-r--r--arch/arm/boot/dts/exynos4210-universal_c210.dts4
-rw-r--r--arch/arm/boot/dts/exynos4210.dtsi12
-rw-r--r--arch/arm/boot/dts/imx23.dtsi1
-rw-r--r--arch/arm/boot/dts/imx25-pdk.dts5
-rw-r--r--arch/arm/boot/dts/imx27.dtsi12
-rw-r--r--arch/arm/boot/dts/imx35.dtsi8
-rw-r--r--arch/arm/boot/dts/imx51-apf51dev.dts2
-rw-r--r--arch/arm/boot/dts/imx53-ard.dts4
-rw-r--r--arch/arm/boot/dts/imx53-m53evk.dts4
-rw-r--r--arch/arm/boot/dts/imx53-qsb-common.dtsi9
-rw-r--r--arch/arm/boot/dts/imx53-smd.dts4
-rw-r--r--arch/arm/boot/dts/imx53-tqma53.dtsi4
-rw-r--r--arch/arm/boot/dts/imx53-tx53.dtsi4
-rw-r--r--arch/arm/boot/dts/imx53-voipac-bsb.dts4
-rw-r--r--arch/arm/boot/dts/imx6dl-riotboard.dts8
-rw-r--r--arch/arm/boot/dts/imx6q-arm2.dts5
-rw-r--r--arch/arm/boot/dts/imx6q-gk802.dts3
-rw-r--r--arch/arm/boot/dts/imx6q-tbs2910.dts4
-rw-r--r--arch/arm/boot/dts/imx6qdl-aristainetos.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-cubox-i.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw52xx.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw53xx.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw54xx.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-hummingboard.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi8
-rw-r--r--arch/arm/boot/dts/imx6qdl-rex.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-sabreauto.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-sabrelite.dtsi6
-rw-r--r--arch/arm/boot/dts/imx6qdl-sabresd.dtsi8
-rw-r--r--arch/arm/boot/dts/imx6qdl-tx6.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-wandboard.dtsi6
-rw-r--r--arch/arm/boot/dts/imx6qdl.dtsi8
-rw-r--r--arch/arm/boot/dts/imx6sl-evk.dts10
-rw-r--r--arch/arm/boot/dts/imx6sx-sabreauto.dts4
-rw-r--r--arch/arm/boot/dts/imx6sx-sdb.dtsi4
-rw-r--r--arch/arm/boot/dts/imx7d-sdb.dts4
-rw-r--r--arch/arm/boot/dts/k2e-clocks.dtsi5
-rw-r--r--arch/arm/boot/dts/k2e.dtsi18
-rw-r--r--arch/arm/boot/dts/k2hk-clocks.dtsi5
-rw-r--r--arch/arm/boot/dts/k2hk.dtsi11
-rw-r--r--arch/arm/boot/dts/k2l-clocks.dtsi5
-rw-r--r--arch/arm/boot/dts/k2l.dtsi16
-rw-r--r--arch/arm/boot/dts/keystone.dtsi14
-rw-r--r--arch/arm/boot/dts/omap2430.dtsi3
-rw-r--r--arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-overo-common-lcd43.dtsi2
-rw-r--r--arch/arm/boot/dts/omap4.dtsi5
-rw-r--r--arch/arm/boot/dts/omap5.dtsi5
-rw-r--r--arch/arm/boot/dts/socfpga_cyclone5_sockit.dts26
-rw-r--r--arch/arm/boot/dts/spear1310-evb.dts2
-rw-r--r--arch/arm/boot/dts/spear1310.dtsi2
-rw-r--r--arch/arm/boot/dts/spear1340-evb.dts2
-rw-r--r--arch/arm/boot/dts/spear1340.dtsi2
-rw-r--r--arch/arm/boot/dts/spear13xx.dtsi2
-rw-r--r--arch/arm/boot/dts/spear300-evb.dts2
-rw-r--r--arch/arm/boot/dts/spear300.dtsi2
-rw-r--r--arch/arm/boot/dts/spear310-evb.dts2
-rw-r--r--arch/arm/boot/dts/spear310.dtsi2
-rw-r--r--arch/arm/boot/dts/spear320-evb.dts2
-rw-r--r--arch/arm/boot/dts/spear320.dtsi2
-rw-r--r--arch/arm/boot/dts/spear3xx.dtsi2
-rw-r--r--arch/arm/boot/dts/ste-ccu8540.dts7
-rw-r--r--arch/arm/boot/dts/ste-ccu9540.dts7
-rw-r--r--arch/arm/boot/dts/ste-dbx5x0.dtsi59
-rw-r--r--arch/arm/boot/dts/ste-href.dtsi2
-rw-r--r--arch/arm/boot/dts/ste-hrefprev60-stuib.dts7
-rw-r--r--arch/arm/boot/dts/ste-hrefprev60-tvk.dts7
-rw-r--r--arch/arm/boot/dts/ste-hrefprev60.dtsi5
-rw-r--r--arch/arm/boot/dts/ste-hrefv60plus-stuib.dts7
-rw-r--r--arch/arm/boot/dts/ste-hrefv60plus-tvk.dts7
-rw-r--r--arch/arm/boot/dts/ste-hrefv60plus.dtsi25
-rw-r--r--arch/arm/boot/dts/ste-nomadik-nhk15.dts1
-rw-r--r--arch/arm/boot/dts/ste-nomadik-s8815.dts4
-rw-r--r--arch/arm/boot/dts/ste-nomadik-stn8815.dtsi1
-rw-r--r--arch/arm/boot/dts/ste-snowball.dts25
-rw-r--r--arch/arm/include/asm/Kbuild1
-rw-r--r--arch/arm/include/asm/memory.h2
-rw-r--r--arch/arm/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/arm/kernel/entry-common.S1
-rw-r--r--arch/arm/kernel/head.S3
-rw-r--r--arch/arm/kernel/perf_event.c3
-rw-r--r--arch/arm/kernel/reboot.c2
-rw-r--r--arch/arm/kernel/vdso.c7
-rw-r--r--arch/arm/lib/uaccess_with_memcpy.c2
-rw-r--r--arch/arm/mach-exynos/pm_domains.c3
-rw-r--r--arch/arm/mach-imx/gpc.c27
-rw-r--r--arch/arm/mach-omap2/Kconfig1
-rw-r--r--arch/arm/mach-omap2/omap-wakeupgen.c1
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c24
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_7xx_data.c5
-rw-r--r--arch/arm/mach-pxa/capc7117.c3
-rw-r--r--arch/arm/mach-pxa/cm-x2xx.c3
-rw-r--r--arch/arm/mach-pxa/cm-x300.c2
-rw-r--r--arch/arm/mach-pxa/colibri-pxa270.c3
-rw-r--r--arch/arm/mach-pxa/em-x270.c2
-rw-r--r--arch/arm/mach-pxa/icontrol.c3
-rw-r--r--arch/arm/mach-pxa/trizeps4.c3
-rw-r--r--arch/arm/mach-pxa/vpac270.c3
-rw-r--r--arch/arm/mach-pxa/zeus.c2
-rw-r--r--arch/arm/mach-spear/generic.h2
-rw-r--r--arch/arm/mach-spear/include/mach/irqs.h2
-rw-r--r--arch/arm/mach-spear/include/mach/misc_regs.h2
-rw-r--r--arch/arm/mach-spear/include/mach/spear.h2
-rw-r--r--arch/arm/mach-spear/include/mach/uncompress.h2
-rw-r--r--arch/arm/mach-spear/pl080.c2
-rw-r--r--arch/arm/mach-spear/pl080.h2
-rw-r--r--arch/arm/mach-spear/restart.c2
-rw-r--r--arch/arm/mach-spear/spear1310.c2
-rw-r--r--arch/arm/mach-spear/spear1340.c2
-rw-r--r--arch/arm/mach-spear/spear13xx.c2
-rw-r--r--arch/arm/mach-spear/spear300.c2
-rw-r--r--arch/arm/mach-spear/spear310.c2
-rw-r--r--arch/arm/mach-spear/spear320.c2
-rw-r--r--arch/arm/mach-spear/spear3xx.c2
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/arm/mm/proc-v7.S14
-rw-r--r--arch/arm/net/bpf_jit_32.c57
-rw-r--r--arch/arm/vdso/Makefile2
-rw-r--r--arch/arm64/boot/dts/apm/apm-storm.dtsi2
-rw-r--r--arch/arm64/include/asm/Kbuild1
-rw-r--r--arch/arm64/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/arm64/kernel/efi.c4
-rw-r--r--arch/arm64/kernel/entry.S5
-rw-r--r--arch/arm64/kernel/irq.c4
-rw-r--r--arch/arm64/kernel/signal32.c5
-rw-r--r--arch/arm64/kernel/vdso.c7
-rw-r--r--arch/avr32/include/asm/Kbuild1
-rw-r--r--arch/avr32/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/avr32/kernel/time.c65
-rw-r--r--arch/avr32/mach-at32ap/clock.c20
-rw-r--r--arch/blackfin/include/asm/Kbuild1
-rw-r--r--arch/blackfin/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/c6x/include/asm/Kbuild1
-rw-r--r--arch/c6x/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/cris/include/asm/Kbuild1
-rw-r--r--arch/cris/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/frv/include/asm/Kbuild1
-rw-r--r--arch/frv/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/h8300/include/asm/Kbuild1
-rw-r--r--arch/hexagon/include/asm/Kbuild1
-rw-r--r--arch/hexagon/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/ia64/include/asm/Kbuild1
-rw-r--r--arch/ia64/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/m32r/include/asm/Kbuild1
-rw-r--r--arch/m32r/include/asm/io.h5
-rw-r--r--arch/m32r/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/m68k/Kconfig.cpu49
-rw-r--r--arch/m68k/configs/m5208evb_defconfig22
-rw-r--r--arch/m68k/configs/m5249evb_defconfig17
-rw-r--r--arch/m68k/configs/m5272c3_defconfig14
-rw-r--r--arch/m68k/configs/m5275evb_defconfig19
-rw-r--r--arch/m68k/configs/m5307c3_defconfig21
-rw-r--r--arch/m68k/configs/m5407c3_defconfig17
-rw-r--r--arch/m68k/configs/m5475evb_defconfig9
-rw-r--r--arch/m68k/include/asm/Kbuild1
-rw-r--r--arch/m68k/include/asm/coldfire.h2
-rw-r--r--arch/m68k/include/asm/io_mm.h3
-rw-r--r--arch/m68k/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/metag/include/asm/Kbuild1
-rw-r--r--arch/metag/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/microblaze/include/asm/Kbuild1
-rw-r--r--arch/microblaze/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/mips/Kconfig7
-rw-r--r--arch/mips/Makefile7
-rw-r--r--arch/mips/ath79/setup.c1
-rw-r--r--arch/mips/cavium-octeon/smp.c2
-rw-r--r--arch/mips/include/asm/Kbuild1
-rw-r--r--arch/mips/include/asm/fpu.h2
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/dma-coherence.h10
-rw-r--r--arch/mips/include/asm/mach-sibyte/war.h3
-rw-r--r--arch/mips/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/mips/include/asm/pgtable.h31
-rw-r--r--arch/mips/include/asm/smp.h2
-rw-r--r--arch/mips/include/asm/stackframe.h25
-rw-r--r--arch/mips/include/uapi/asm/sigcontext.h4
-rw-r--r--arch/mips/kernel/asm-offsets.c2
-rw-r--r--arch/mips/kernel/genex.S2
-rw-r--r--arch/mips/kernel/mips-mt-fpaff.c5
-rw-r--r--arch/mips/kernel/prom.c2
-rw-r--r--arch/mips/kernel/relocate_kernel.S8
-rw-r--r--arch/mips/kernel/scall64-64.S2
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/signal32.c2
-rw-r--r--arch/mips/kernel/smp-bmips.c4
-rw-r--r--arch/mips/kernel/smp.c10
-rw-r--r--arch/mips/kernel/traps.c13
-rw-r--r--arch/mips/kernel/unaligned.c2
-rw-r--r--arch/mips/lantiq/irq.c3
-rw-r--r--arch/mips/loongson64/loongson-3/smp.c7
-rw-r--r--arch/mips/mm/cache.c8
-rw-r--r--arch/mips/mm/fault.c3
-rw-r--r--arch/mips/mti-malta/malta-int.c2
-rw-r--r--arch/mips/mti-malta/malta-time.c16
-rw-r--r--arch/mips/mti-sead3/sead3-time.c1
-rw-r--r--arch/mips/netlogic/common/smp.c2
-rw-r--r--arch/mips/paravirt/paravirt-smp.c2
-rw-r--r--arch/mips/pistachio/time.c1
-rw-r--r--arch/mips/pmcs-msp71xx/msp_smp.c2
-rw-r--r--arch/mips/ralink/irq.c1
-rw-r--r--arch/mips/sgi-ip27/ip27-irq.c8
-rw-r--r--arch/mips/sibyte/Kconfig5
-rw-r--r--arch/mips/sibyte/bcm1480/smp.c9
-rw-r--r--arch/mips/sibyte/common/bus_watcher.c5
-rw-r--r--arch/mips/sibyte/sb1250/setup.c2
-rw-r--r--arch/mips/sibyte/sb1250/smp.c7
-rw-r--r--arch/mn10300/include/asm/Kbuild1
-rw-r--r--arch/mn10300/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/nios2/include/asm/Kbuild1
-rw-r--r--arch/nios2/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/openrisc/Kconfig4
-rw-r--r--arch/openrisc/include/asm/Kbuild1
-rw-r--r--arch/openrisc/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/parisc/include/asm/Kbuild1
-rw-r--r--arch/parisc/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/parisc/include/asm/pgalloc.h3
-rw-r--r--arch/powerpc/kernel/signal_32.c2
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c2
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c11
-rw-r--r--arch/s390/include/asm/Kbuild1
-rw-r--r--arch/s390/include/asm/ctl_reg.h5
-rw-r--r--arch/s390/include/asm/hugetlb.h1
-rw-r--r--arch/s390/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/s390/include/asm/page.h8
-rw-r--r--arch/s390/include/asm/perf_event.h8
-rw-r--r--arch/s390/kernel/asm-offsets.c15
-rw-r--r--arch/s390/kernel/cache.c2
-rw-r--r--arch/s390/kernel/entry.S13
-rw-r--r--arch/s390/kernel/nmi.c51
-rw-r--r--arch/s390/kernel/process.c2
-rw-r--r--arch/s390/kernel/sclp.S4
-rw-r--r--arch/s390/kernel/setup.c2
-rw-r--r--arch/s390/kernel/traps.c4
-rw-r--r--arch/s390/kvm/kvm-s390.c4
-rw-r--r--arch/s390/mm/pgtable.c2
-rw-r--r--arch/s390/net/bpf_jit_comp.c14
-rw-r--r--arch/s390/oprofile/init.c1
-rw-r--r--arch/score/include/asm/Kbuild1
-rw-r--r--arch/score/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/sh/include/asm/Kbuild1
-rw-r--r--arch/sh/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/sparc/include/asm/Kbuild1
-rw-r--r--arch/sparc/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/sparc/include/asm/visasm.h16
-rw-r--r--arch/sparc/lib/NG4memcpy.S5
-rw-r--r--arch/sparc/lib/VISsave.S67
-rw-r--r--arch/sparc/lib/ksyms.c4
-rw-r--r--arch/tile/include/asm/Kbuild1
-rw-r--r--arch/tile/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/tile/kernel/compat_signal.c2
-rw-r--r--arch/tile/kernel/setup.c2
-rw-r--r--arch/um/include/asm/Kbuild1
-rw-r--r--arch/um/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/unicore32/include/asm/Kbuild1
-rw-r--r--arch/unicore32/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/Kconfig.debug12
-rw-r--r--arch/x86/boot/compressed/eboot.c4
-rw-r--r--arch/x86/entry/entry_64.S299
-rw-r--r--arch/x86/entry/entry_64_compat.S17
-rw-r--r--arch/x86/include/asm/Kbuild1
-rw-r--r--arch/x86/include/asm/desc.h15
-rw-r--r--arch/x86/include/asm/fpu/types.h72
-rw-r--r--arch/x86/include/asm/intel_pmc_ipc.h27
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/x86/include/asm/mmu.h3
-rw-r--r--arch/x86/include/asm/mmu_context.h56
-rw-r--r--arch/x86/include/asm/processor.h10
-rw-r--r--arch/x86/include/asm/sigcontext.h6
-rw-r--r--arch/x86/include/asm/switch_to.h12
-rw-r--r--arch/x86/include/uapi/asm/hyperv.h2
-rw-r--r--arch/x86/include/uapi/asm/kvm.h4
-rw-r--r--arch/x86/include/uapi/asm/sigcontext.h21
-rw-r--r--arch/x86/kernel/apic/io_apic.c2
-rw-r--r--arch/x86/kernel/apic/vector.c2
-rw-r--r--arch/x86/kernel/cpu/common.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event.c12
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c23
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_cqm.c16
-rw-r--r--arch/x86/kernel/fpu/core.c2
-rw-r--r--arch/x86/kernel/fpu/init.c53
-rw-r--r--arch/x86/kernel/ldt.c262
-rw-r--r--arch/x86/kernel/nmi.c123
-rw-r--r--arch/x86/kernel/process.c4
-rw-r--r--arch/x86/kernel/process_64.c4
-rw-r--r--arch/x86/kernel/signal.c26
-rw-r--r--arch/x86/kernel/smpboot.c11
-rw-r--r--arch/x86/kernel/step.c8
-rw-r--r--arch/x86/kvm/cpuid.c2
-rw-r--r--arch/x86/kvm/iommu.c2
-rw-r--r--arch/x86/kvm/lapic.c2
-rw-r--r--arch/x86/kvm/mmu.c10
-rw-r--r--arch/x86/kvm/mtrr.c40
-rw-r--r--arch/x86/kvm/svm.c110
-rw-r--r--arch/x86/kvm/vmx.c16
-rw-r--r--arch/x86/kvm/x86.c33
-rw-r--r--arch/x86/kvm/x86.h5
-rw-r--r--arch/x86/math-emu/fpu_entry.c3
-rw-r--r--arch/x86/math-emu/fpu_system.h21
-rw-r--r--arch/x86/math-emu/get_address.c3
-rw-r--r--arch/x86/mm/ioremap.c23
-rw-r--r--arch/x86/mm/mmap.c7
-rw-r--r--arch/x86/mm/mpx.c24
-rw-r--r--arch/x86/mm/tlb.c2
-rw-r--r--arch/x86/net/bpf_jit_comp.c8
-rw-r--r--arch/x86/platform/efi/efi.c5
-rw-r--r--arch/x86/power/cpu.c3
-rw-r--r--arch/x86/xen/Kconfig4
-rw-r--r--arch/x86/xen/Makefile4
-rw-r--r--arch/x86/xen/enlighten.c40
-rw-r--r--arch/x86/xen/xen-ops.h6
-rw-r--r--arch/xtensa/include/asm/Kbuild1
-rw-r--r--arch/xtensa/include/asm/mm-arch-hooks.h15
-rw-r--r--block/bio-integrity.c4
-rw-r--r--block/bio.c17
-rw-r--r--block/blk-cgroup.c146
-rw-r--r--block/blk-core.c2
-rw-r--r--block/blk-mq.c2
-rw-r--r--block/blk-settings.c4
-rw-r--r--crypto/authencesn.c44
-rw-r--r--drivers/acpi/device_pm.c2
-rw-r--r--drivers/acpi/resource.c24
-rw-r--r--drivers/acpi/video_detect.c16
-rw-r--r--drivers/ata/ahci_brcmstb.c6
-rw-r--r--drivers/ata/libata-core.c45
-rw-r--r--drivers/ata/libata-eh.c105
-rw-r--r--drivers/ata/libata-pmp.c7
-rw-r--r--drivers/ata/libata-scsi.c24
-rw-r--r--drivers/ata/libata-transport.c2
-rw-r--r--drivers/ata/libata.h6
-rw-r--r--drivers/ata/pata_arasan_cf.c4
-rw-r--r--drivers/ata/sata_sx4.c16
-rw-r--r--drivers/base/regmap/regcache-rbtree.c19
-rw-r--r--drivers/block/null_blk.c18
-rw-r--r--drivers/block/nvme-core.c13
-rw-r--r--drivers/block/rbd.c22
-rw-r--r--drivers/block/xen-blkback/blkback.c4
-rw-r--r--drivers/block/xen-blkfront.c128
-rw-r--r--drivers/block/zram/zram_drv.c6
-rw-r--r--drivers/bluetooth/btbcm.c11
-rw-r--r--drivers/char/hw_random/core.c2
-rw-r--r--drivers/char/tpm/tpm-chip.c3
-rw-r--r--drivers/char/tpm/tpm_crb.c8
-rw-r--r--drivers/clk/pxa/clk-pxa3xx.c2
-rw-r--r--drivers/clk/spear/clk-aux-synth.c2
-rw-r--r--drivers/clk/spear/clk-frac-synth.c2
-rw-r--r--drivers/clk/spear/clk-gpt-synth.c2
-rw-r--r--drivers/clk/spear/clk-vco-pll.c2
-rw-r--r--drivers/clk/spear/clk.c2
-rw-r--r--drivers/clk/spear/clk.h2
-rw-r--r--drivers/clk/spear/spear1310_clock.c2
-rw-r--r--drivers/clk/spear/spear1340_clock.c2
-rw-r--r--drivers/clk/spear/spear3xx_clock.c2
-rw-r--r--drivers/clk/spear/spear6xx_clock.c2
-rw-r--r--drivers/clocksource/sh_cmt.c6
-rw-r--r--drivers/clocksource/timer-imx-gpt.c1
-rw-r--r--drivers/cpufreq/cpufreq.c118
-rw-r--r--drivers/cpufreq/exynos-cpufreq.c6
-rw-r--r--drivers/cpufreq/freq_table.c9
-rw-r--r--drivers/cpufreq/intel_pstate.c1
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c2
-rw-r--r--drivers/cpuidle/cpuidle.c9
-rw-r--r--drivers/crypto/caam/caamhash.c7
-rw-r--r--drivers/crypto/ixp4xx_crypto.c1
-rw-r--r--drivers/crypto/nx/nx-aes-ccm.c6
-rw-r--r--drivers/crypto/nx/nx-aes-ctr.c7
-rw-r--r--drivers/crypto/nx/nx-aes-gcm.c17
-rw-r--r--drivers/crypto/nx/nx-aes-xcbc.c70
-rw-r--r--drivers/crypto/nx/nx-sha256.c70
-rw-r--r--drivers/crypto/nx/nx-sha512.c72
-rw-r--r--drivers/crypto/nx/nx.c3
-rw-r--r--drivers/crypto/nx/nx.h14
-rw-r--r--drivers/crypto/omap-des.c3
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c24
-rw-r--r--drivers/dma/at_hdmac.c132
-rw-r--r--drivers/dma/at_hdmac_regs.h3
-rw-r--r--drivers/dma/at_xdmac.c26
-rw-r--r--drivers/dma/dmaengine.c4
-rw-r--r--drivers/dma/dw/core.c2
-rw-r--r--drivers/dma/mv_xor.c9
-rw-r--r--drivers/dma/pl330.c3
-rw-r--r--drivers/dma/virt-dma.c19
-rw-r--r--drivers/dma/virt-dma.h13
-rw-r--r--drivers/dma/xgene-dma.c3
-rw-r--r--drivers/edac/ppc4xx_edac.c2
-rw-r--r--drivers/extcon/extcon-max77693.c94
-rw-r--r--drivers/extcon/extcon-max77843.c66
-rw-r--r--drivers/extcon/extcon-palmas.c13
-rw-r--r--drivers/extcon/extcon.c61
-rw-r--r--drivers/firmware/broadcom/bcm47xx_nvram.c2
-rw-r--r--drivers/firmware/efi/cper.c15
-rw-r--r--drivers/firmware/efi/efi.c5
-rw-r--r--drivers/gpio/gpio-brcmstb.c14
-rw-r--r--drivers/gpio/gpio-davinci.c6
-rw-r--r--drivers/gpio/gpio-max732x.c1
-rw-r--r--drivers/gpio/gpio-omap.c5
-rw-r--r--drivers/gpio/gpio-pca953x.c4
-rw-r--r--drivers/gpio/gpio-xilinx.c4
-rw-r--r--drivers/gpio/gpio-zynq.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.c86
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c90
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c35
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c2
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c5
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c121
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c1
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c14
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c10
-rw-r--r--drivers/gpu/drm/drm_crtc.c12
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c20
-rw-r--r--drivers/gpu/drm/drm_ioc32.c60
-rw-r--r--drivers/gpu/drm/drm_irq.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c21
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h23
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c35
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c20
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c17
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c29
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c13
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h2
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c45
-rw-r--r--drivers/gpu/drm/i915/intel_display.c67
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c35
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c11
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h7
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c26
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c2
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c21
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c13
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c33
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c87
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h1
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c8
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c13
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h4
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c8
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c3
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c62
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c204
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c1
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c3
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c67
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c49
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c4
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c3
-rw-r--r--drivers/hid/hid-apple.c6
-rw-r--r--drivers/hid/hid-core.c6
-rw-r--r--drivers/hid/hid-cp2112.c2
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-input.c7
-rw-r--r--drivers/hid/hid-multitouch.c7
-rw-r--r--drivers/hid/hid-uclogic.c2
-rw-r--r--drivers/hid/usbhid/hid-quirks.c3
-rw-r--r--drivers/hid/wacom_sys.c76
-rw-r--r--drivers/hid/wacom_wac.c3
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c18
-rw-r--r--drivers/hwmon/g762.c1
-rw-r--r--drivers/hwmon/nct7802.c2
-rw-r--r--drivers/hwmon/nct7904.c58
-rw-r--r--drivers/i2c/busses/i2c-bfin-twi.c4
-rw-r--r--drivers/i2c/busses/i2c-omap.c11
-rw-r--r--drivers/i2c/i2c-core.c24
-rw-r--r--drivers/i2c/i2c-slave-eeprom.c6
-rw-r--r--drivers/iio/accel/bmc150-accel.c2
-rw-r--r--drivers/iio/accel/mma8452.c8
-rw-r--r--drivers/iio/adc/Kconfig3
-rw-r--r--drivers/iio/adc/at91_adc.c8
-rw-r--r--drivers/iio/adc/mcp320x.c2
-rw-r--r--drivers/iio/adc/rockchip_saradc.c4
-rw-r--r--drivers/iio/adc/twl4030-madc.c3
-rw-r--r--drivers/iio/adc/vf610_adc.c2
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c11
-rw-r--r--drivers/iio/dac/ad5624r_spi.c4
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c18
-rw-r--r--drivers/iio/light/Kconfig2
-rw-r--r--drivers/iio/light/cm3323.c2
-rw-r--r--drivers/iio/light/ltr501.c2
-rw-r--r--drivers/iio/light/stk3310.c75
-rw-r--r--drivers/iio/light/tcs3414.c2
-rw-r--r--drivers/iio/magnetometer/Kconfig1
-rw-r--r--drivers/iio/magnetometer/bmc150_magn.c4
-rw-r--r--drivers/iio/magnetometer/mmc35240.c47
-rw-r--r--drivers/iio/proximity/sx9500.c28
-rw-r--r--drivers/iio/temperature/mlx90614.c2
-rw-r--r--drivers/iio/temperature/tmp006.c3
-rw-r--r--drivers/infiniband/core/agent.c4
-rw-r--r--drivers/infiniband/core/cm.c61
-rw-r--r--drivers/infiniband/core/iwpm_msg.c33
-rw-r--r--drivers/infiniband/core/iwpm_util.c12
-rw-r--r--drivers/infiniband/core/iwpm_util.h28
-rw-r--r--drivers/infiniband/core/mad.c47
-rw-r--r--drivers/infiniband/core/multicast.c8
-rw-r--r--drivers/infiniband/core/opa_smi.h4
-rw-r--r--drivers/infiniband/core/sa_query.c8
-rw-r--r--drivers/infiniband/core/smi.c37
-rw-r--r--drivers/infiniband/core/smi.h4
-rw-r--r--drivers/infiniband/core/sysfs.c2
-rw-r--r--drivers/infiniband/core/ucm.c4
-rw-r--r--drivers/infiniband/core/ucma.c5
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_sqp.c5
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c6
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mad.c5
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c4
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c34
-rw-r--r--drivers/infiniband/hw/mlx4/main.c33
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c5
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c5
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c5
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_abi.h53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c58
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.h53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.h53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c56
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_sli.h53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.h53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.h53
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c5
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h29
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c33
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c49
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c21
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c3
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c16
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c23
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c71
-rw-r--r--drivers/input/input-leds.c16
-rw-r--r--drivers/input/joystick/turbografx.c2
-rw-r--r--drivers/input/keyboard/gpio_keys_polled.c2
-rw-r--r--drivers/input/misc/Kconfig18
-rw-r--r--drivers/input/misc/Makefile1
-rw-r--r--drivers/input/misc/axp20x-pek.c1
-rw-r--r--drivers/input/misc/max77693-haptic.c92
-rw-r--r--drivers/input/misc/max77843-haptic.c358
-rw-r--r--drivers/input/misc/twl4030-vibra.c3
-rw-r--r--drivers/input/mouse/alps.c8
-rw-r--r--drivers/input/mouse/bcm5974.c165
-rw-r--r--drivers/input/mouse/elantech.c35
-rw-r--r--drivers/input/mouse/elantech.h1
-rw-r--r--drivers/input/mouse/synaptics.c4
-rw-r--r--drivers/input/touchscreen/goodix.c36
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c3
-rw-r--r--drivers/iommu/amd_iommu.c98
-rw-r--r--drivers/iommu/amd_iommu_init.c10
-rw-r--r--drivers/iommu/amd_iommu_v2.c24
-rw-r--r--drivers/iommu/arm-smmu-v3.c60
-rw-r--r--drivers/iommu/intel-iommu.c9
-rw-r--r--drivers/irqchip/irq-crossbar.c4
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c111
-rw-r--r--drivers/irqchip/irq-mips-gic.c2
-rw-r--r--drivers/irqchip/spear-shirq.c2
-rw-r--r--drivers/isdn/gigaset/ser-gigaset.c35
-rw-r--r--drivers/leds/leds-max77693.c1
-rw-r--r--drivers/macintosh/ans-lcd.c2
-rw-r--r--drivers/md/Kconfig2
-rw-r--r--drivers/md/bcache/closure.h3
-rw-r--r--drivers/md/bcache/io.c1
-rw-r--r--drivers/md/bcache/journal.c2
-rw-r--r--drivers/md/bcache/request.c14
-rw-r--r--drivers/md/bitmap.c28
-rw-r--r--drivers/md/dm-cache-policy-mq.c2
-rw-r--r--drivers/md/dm-cache-policy-smq.c4
-rw-r--r--drivers/md/dm-cache-target.c37
-rw-r--r--drivers/md/dm-thin-metadata.c4
-rw-r--r--drivers/md/dm-thin.c53
-rw-r--r--drivers/md/dm.c39
-rw-r--r--drivers/md/md-cluster.c12
-rw-r--r--drivers/md/md-cluster.h2
-rw-r--r--drivers/md/md.c6
-rw-r--r--drivers/md/persistent-data/dm-btree-internal.h6
-rw-r--r--drivers/md/persistent-data/dm-btree-remove.c23
-rw-r--r--drivers/md/persistent-data/dm-btree-spine.c37
-rw-r--r--drivers/md/persistent-data/dm-btree.c9
-rw-r--r--drivers/md/raid1.c19
-rw-r--r--drivers/md/raid10.c5
-rw-r--r--drivers/md/raid5.c38
-rw-r--r--drivers/md/raid5.h3
-rw-r--r--drivers/media/dvb-frontends/Kconfig2
-rw-r--r--drivers/media/pci/cobalt/Kconfig1
-rw-r--r--drivers/media/pci/cobalt/cobalt-irq.c2
-rw-r--r--drivers/media/pci/ivtv/ivtvfb.c15
-rw-r--r--drivers/media/pci/mantis/mantis_dma.c5
-rw-r--r--drivers/media/rc/ir-rc5-decoder.c116
-rw-r--r--drivers/media/rc/ir-rc6-decoder.c122
-rw-r--r--drivers/media/rc/nuvoton-cir.c127
-rw-r--r--drivers/media/rc/nuvoton-cir.h1
-rw-r--r--drivers/media/rc/rc-core-priv.h36
-rw-r--r--drivers/media/rc/rc-ir-raw.c139
-rw-r--r--drivers/media/rc/rc-loopback.c36
-rw-r--r--drivers/media/rc/rc-main.c7
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c40
-rw-r--r--drivers/memory/omap-gpmc.c6
-rw-r--r--drivers/mfd/Kconfig2
-rw-r--r--drivers/mfd/arizona-core.c16
-rw-r--r--drivers/mfd/max77693.c31
-rw-r--r--drivers/mfd/max77843.c20
-rw-r--r--drivers/mfd/stmpe-i2c.c2
-rw-r--r--drivers/mfd/stmpe-spi.c4
-rw-r--r--drivers/misc/eeprom/at24.c3
-rw-r--r--drivers/misc/mei/main.c2
-rw-r--r--drivers/misc/mic/scif/scif_nodeqp.c15
-rw-r--r--drivers/mmc/card/block.c2
-rw-r--r--drivers/mmc/host/Kconfig1
-rw-r--r--drivers/mmc/host/omap_hsmmc.c11
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c210
-rw-r--r--drivers/mmc/host/sdhci-esdhc.h2
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c1
-rw-r--r--drivers/mmc/host/sdhci-spear.c4
-rw-r--r--drivers/mmc/host/sdhci.c16
-rw-r--r--drivers/net/bonding/bond_main.c86
-rw-r--r--drivers/net/can/at91_can.c8
-rw-r--r--drivers/net/can/bfin_can.c6
-rw-r--r--drivers/net/can/c_can/c_can.c10
-rw-r--r--drivers/net/can/cc770/cc770.c4
-rw-r--r--drivers/net/can/dev.c7
-rw-r--r--drivers/net/can/flexcan.c7
-rw-r--r--drivers/net/can/grcan.c3
-rw-r--r--drivers/net/can/rcar_can.c16
-rw-r--r--drivers/net/can/sja1000/sja1000.c6
-rw-r--r--drivers/net/can/slcan.c4
-rw-r--r--drivers/net/can/spi/mcp251x.c17
-rw-r--r--drivers/net/can/ti_hecc.c2
-rw-r--r--drivers/net/can/usb/ems_usb.c6
-rw-r--r--drivers/net/can/usb/esd_usb2.c6
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c7
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c4
-rw-r--r--drivers/net/can/usb/usb_8dev.c6
-rw-r--r--drivers/net/can/vcan.c3
-rw-r--r--drivers/net/dsa/bcm_sf2.c15
-rw-r--r--drivers/net/dsa/mv88e6xxx.c2
-rw-r--r--drivers/net/ethernet/3com/3c59x.c21
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c3
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c11
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c17
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h3
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c16
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c4
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c9
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c2
-rw-r--r--drivers/net/ethernet/cadence/macb.c125
-rw-r--r--drivers/net/ethernet/cadence/macb.h34
-rw-r--r--drivers/net/ethernet/cavium/Kconfig3
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h12
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c26
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c55
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c17
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h14
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c28
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h5
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c191
-rw-r--r--drivers/net/ethernet/freescale/fec.h1
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c99
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c10
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c3
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c109
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h3
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c350
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c22
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c244
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c4
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c74
-rw-r--r--drivers/net/ethernet/rocker/rocker.c1
-rw-r--r--drivers/net/ethernet/sfc/ef10.c172
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c59
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.h6
-rw-r--r--drivers/net/ethernet/sfc/efx.c14
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/tx.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c4
-rw-r--r--drivers/net/ethernet/sun/niu.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw.c34
-rw-r--r--drivers/net/ethernet/ti/netcp.h2
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c51
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c67
-rw-r--r--drivers/net/ethernet/ti/netcp_sgmii.c30
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c8
-rw-r--r--drivers/net/hamradio/bpqether.c1
-rw-r--r--drivers/net/hamradio/mkiss.c7
-rw-r--r--drivers/net/ipvlan/ipvlan.h9
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c6
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c42
-rw-r--r--drivers/net/macvtap.c8
-rw-r--r--drivers/net/ntb_netdev.c9
-rw-r--r--drivers/net/phy/Kconfig2
-rw-r--r--drivers/net/phy/dp83867.c2
-rw-r--r--drivers/net/phy/mdio_bus.c19
-rw-r--r--drivers/net/phy/phy.c16
-rw-r--r--drivers/net/phy/smsc.c31
-rw-r--r--drivers/net/ppp/ppp_generic.c78
-rw-r--r--drivers/net/usb/cdc_ether.c8
-rw-r--r--drivers/net/usb/cdc_mbim.c2
-rw-r--r--drivers/net/usb/cdc_ncm.c63
-rw-r--r--drivers/net/usb/huawei_cdc_ncm.c7
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/r8152.c191
-rw-r--r--drivers/net/virtio_net.c7
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c8
-rw-r--r--drivers/net/wan/cosa.c3
-rw-r--r--drivers/net/wan/z85230.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c1
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c12
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c5
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h51
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c414
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c74
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c15
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio_ops.c8
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb_ops.c4
-rw-r--r--drivers/net/wireless/rtlwifi/core.c7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/sw.c1
-rw-r--r--drivers/net/xen-netback/interface.c6
-rw-r--r--drivers/net/xen-netback/netback.c68
-rw-r--r--drivers/ntb/ntb.c2
-rw-r--r--drivers/ntb/ntb_transport.c201
-rw-r--r--drivers/nvdimm/region_devs.c5
-rw-r--r--drivers/of/Kconfig2
-rw-r--r--drivers/of/unittest.c3
-rw-r--r--drivers/parport/share.c11
-rw-r--r--drivers/pci/Kconfig2
-rw-r--r--drivers/pci/probe.c7
-rw-r--r--drivers/phy/Kconfig2
-rw-r--r--drivers/phy/phy-berlin-usb.c4
-rw-r--r--drivers/phy/phy-sun4i-usb.c1
-rw-r--r--drivers/phy/phy-ti-pipe3.c217
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1-core.c3
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c1
-rw-r--r--drivers/pinctrl/pinctrl-lpc18xx.c4
-rw-r--r--drivers/pinctrl/pinctrl-single.c3
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c5
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear.c2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear.h2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear1310.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear1340.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear300.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear310.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear320.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear3xx.c2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear3xx.h2
-rw-r--r--drivers/platform/chrome/Kconfig1
-rw-r--r--drivers/platform/x86/dell-laptop.c171
-rw-r--r--drivers/platform/x86/intel_pmc_ipc.c83
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c6
-rw-r--r--drivers/power/max77693_charger.c1
-rw-r--r--drivers/regulator/88pm800.c236
-rw-r--r--drivers/regulator/Kconfig43
-rw-r--r--drivers/regulator/Makefile3
-rw-r--r--drivers/regulator/act8865-regulator.c1
-rw-r--r--drivers/regulator/ad5398.c1
-rw-r--r--drivers/regulator/axp20x-regulator.c1
-rw-r--r--drivers/regulator/core.c166
-rw-r--r--drivers/regulator/da9062-regulator.c1
-rw-r--r--drivers/regulator/da9210-regulator.c76
-rw-r--r--drivers/regulator/da9211-regulator.c41
-rw-r--r--drivers/regulator/da9211-regulator.h18
-rw-r--r--drivers/regulator/fan53555.c1
-rw-r--r--drivers/regulator/isl6271a-regulator.c1
-rw-r--r--drivers/regulator/isl9305.c2
-rw-r--r--drivers/regulator/lp3971.c1
-rw-r--r--drivers/regulator/lp3972.c1
-rw-r--r--drivers/regulator/lp872x.c17
-rw-r--r--drivers/regulator/ltc3589.c4
-rw-r--r--drivers/regulator/max1586.c1
-rw-r--r--drivers/regulator/max77693.c173
-rw-r--r--drivers/regulator/max77843.c201
-rw-r--r--drivers/regulator/max8660.c1
-rw-r--r--drivers/regulator/max8973-regulator.c83
-rw-r--r--drivers/regulator/mt6311-regulator.c179
-rw-r--r--drivers/regulator/mt6311-regulator.h65
-rw-r--r--drivers/regulator/of_regulator.c3
-rw-r--r--drivers/regulator/pbias-regulator.c5
-rw-r--r--drivers/regulator/pfuze100-regulator.c2
-rw-r--r--drivers/regulator/pwm-regulator.c160
-rw-r--r--drivers/regulator/qcom_smd-regulator.c350
-rw-r--r--drivers/regulator/qcom_spmi-regulator.c203
-rw-r--r--drivers/regulator/rk808-regulator.c212
-rw-r--r--drivers/regulator/s2mps11.c14
-rw-r--r--drivers/regulator/tps51632-regulator.c1
-rw-r--r--drivers/regulator/tps62360-regulator.c1
-rw-r--r--drivers/regulator/tps65023-regulator.c1
-rw-r--r--drivers/rtc/rtc-armada38x.c2
-rw-r--r--drivers/rtc/rtc-mt6397.c4
-rw-r--r--drivers/s390/Makefile2
-rw-r--r--drivers/s390/block/dasd.c36
-rw-r--r--drivers/s390/block/dasd_alias.c3
-rw-r--r--drivers/s390/char/sclp_early.c1
-rw-r--r--drivers/s390/crypto/zcrypt_api.c7
-rw-r--r--drivers/s390/virtio/Makefile (renamed from drivers/s390/kvm/Makefile)0
-rw-r--r--drivers/s390/virtio/kvm_virtio.c (renamed from drivers/s390/kvm/kvm_virtio.c)0
-rw-r--r--drivers/s390/virtio/virtio_ccw.c (renamed from drivers/s390/kvm/virtio_ccw.c)0
-rw-r--r--drivers/scsi/fnic/fnic.h2
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c4
-rw-r--r--drivers/scsi/ipr.c28
-rw-r--r--drivers/scsi/ipr.h1
-rw-r--r--drivers/scsi/libfc/fc_exch.c8
-rw-r--r--drivers/scsi/libfc/fc_fcp.c19
-rw-r--r--drivers/scsi/libiscsi.c25
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h20
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c190
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c763
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h72
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c28
-rw-r--r--drivers/scsi/scsi_error.c33
-rw-r--r--drivers/scsi/scsi_lib.c6
-rw-r--r--drivers/scsi/scsi_pm.c22
-rw-r--r--drivers/scsi/scsi_sysfs.c2
-rw-r--r--drivers/scsi/scsi_transport_srp.c3
-rw-r--r--drivers/scsi/sd.c6
-rw-r--r--drivers/scsi/st.c2
-rw-r--r--drivers/scsi/virtio_scsi.c4
-rw-r--r--drivers/spi/Kconfig2
-rw-r--r--drivers/spi/spi-img-spfi.c2
-rw-r--r--drivers/spi/spi-imx.c5
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c1
-rw-r--r--drivers/spi/spidev.c1
-rw-r--r--drivers/staging/board/Kconfig2
-rw-r--r--drivers/staging/comedi/drivers/das1800.c1
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/debug.c2
-rw-r--r--drivers/staging/vt6655/device_main.c7
-rw-r--r--drivers/staging/vt6656/main_usb.c2
-rw-r--r--drivers/target/iscsi/iscsi_target.c52
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c45
-rw-r--r--drivers/target/iscsi/iscsi_target_login.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c34
-rw-r--r--drivers/target/target_core_configfs.c49
-rw-r--r--drivers/target/target_core_hba.c10
-rw-r--r--drivers/target/target_core_pr.c2
-rw-r--r--drivers/target/target_core_rd.c1
-rw-r--r--drivers/target/target_core_spc.c53
-rw-r--r--drivers/thermal/cpu_cooling.c73
-rw-r--r--drivers/thermal/hisi_thermal.c1
-rw-r--r--drivers/thermal/power_allocator.c34
-rw-r--r--drivers/thermal/samsung/Kconfig2
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c5
-rw-r--r--drivers/thermal/thermal_core.c1
-rw-r--r--drivers/tty/n_tty.c16
-rw-r--r--drivers/tty/serial/Kconfig2
-rw-r--r--drivers/tty/serial/amba-pl011.c4
-rw-r--r--drivers/tty/serial/etraxfs-uart.c2
-rw-r--r--drivers/tty/serial/imx.c15
-rw-r--r--drivers/tty/serial/sc16is7xx.c30
-rw-r--r--drivers/tty/serial/serial_core.c3
-rw-r--r--drivers/tty/vt/selection.c1
-rw-r--r--drivers/tty/vt/vt.c2
-rw-r--r--drivers/usb/chipidea/core.c13
-rw-r--r--drivers/usb/chipidea/host.c7
-rw-r--r--drivers/usb/chipidea/host.h6
-rw-r--r--drivers/usb/class/cdc-acm.c1
-rw-r--r--drivers/usb/common/ulpi.c2
-rw-r--r--drivers/usb/core/hcd.c7
-rw-r--r--drivers/usb/core/hub.c2
-rw-r--r--drivers/usb/core/usb.h1
-rw-r--r--drivers/usb/dwc2/core.c55
-rw-r--r--drivers/usb/dwc2/core.h9
-rw-r--r--drivers/usb/dwc2/hcd.c55
-rw-r--r--drivers/usb/dwc2/hcd.h5
-rw-r--r--drivers/usb/dwc2/hcd_queue.c49
-rw-r--r--drivers/usb/dwc3/core.c6
-rw-r--r--drivers/usb/dwc3/ep0.c4
-rw-r--r--drivers/usb/gadget/composite.c11
-rw-r--r--drivers/usb/gadget/configfs.c2
-rw-r--r--drivers/usb/gadget/function/f_fs.c6
-rw-r--r--drivers/usb/gadget/function/f_hid.c4
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c16
-rw-r--r--drivers/usb/gadget/function/f_midi.c4
-rw-r--r--drivers/usb/gadget/function/f_printer.c10
-rw-r--r--drivers/usb/gadget/function/f_uac2.c4
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_ep.c2
-rw-r--r--drivers/usb/gadget/udc/fotg210-udc.c3
-rw-r--r--drivers/usb/gadget/udc/mv_udc_core.c2
-rw-r--r--drivers/usb/gadget/udc/udc-core.c15
-rw-r--r--drivers/usb/host/ohci-q.c7
-rw-r--r--drivers/usb/host/ohci-tmio.c2
-rw-r--r--drivers/usb/host/xhci-hub.c22
-rw-r--r--drivers/usb/host/xhci-mem.c5
-rw-r--r--drivers/usb/host/xhci-pci.c57
-rw-r--r--drivers/usb/host/xhci-ring.c5
-rw-r--r--drivers/usb/host/xhci.c3
-rw-r--r--drivers/usb/host/xhci.h1
-rw-r--r--drivers/usb/musb/musb_virthub.c4
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c3
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/mos7720.c253
-rw-r--r--drivers/usb/serial/option.c3
-rw-r--r--drivers/usb/serial/qcserial.c2
-rw-r--r--drivers/usb/serial/sierra.c1
-rw-r--r--drivers/usb/serial/usb-serial.c1
-rw-r--r--drivers/usb/storage/unusual_devs.h23
-rw-r--r--drivers/vfio/vfio.c91
-rw-r--r--drivers/vhost/vhost.c65
-rw-r--r--drivers/video/console/fbcon.c3
-rw-r--r--drivers/video/fbdev/Kconfig2
-rw-r--r--drivers/video/fbdev/omap2/dss/dss-of.c4
-rw-r--r--drivers/video/fbdev/pxa3xx-gcu.c4
-rw-r--r--drivers/video/of_videomode.c4
-rw-r--r--drivers/virtio/virtio_input.c4
-rw-r--r--drivers/watchdog/sp805_wdt.c4
-rw-r--r--drivers/xen/balloon.c15
-rw-r--r--drivers/xen/gntdev.c2
-rw-r--r--drivers/xen/xenbus/xenbus_client.c4
-rw-r--r--fs/btrfs/dev-replace.c2
-rw-r--r--fs/btrfs/disk-io.c3
-rw-r--r--fs/btrfs/extent-tree.c31
-rw-r--r--fs/btrfs/inode.c5
-rw-r--r--fs/btrfs/ioctl.c18
-rw-r--r--fs/btrfs/qgroup.c5
-rw-r--r--fs/btrfs/transaction.c7
-rw-r--r--fs/ceph/caps.c22
-rw-r--r--fs/ceph/locks.c2
-rw-r--r--fs/ceph/super.h1
-rw-r--r--fs/configfs/item.c4
-rw-r--r--fs/dax.c14
-rw-r--r--fs/dcache.c13
-rw-r--r--fs/f2fs/data.c2
-rw-r--r--fs/f2fs/file.c7
-rw-r--r--fs/f2fs/gc.c30
-rw-r--r--fs/f2fs/inline.c2
-rw-r--r--fs/f2fs/segment.c1
-rw-r--r--fs/file_table.c24
-rw-r--r--fs/fs-writeback.c1
-rw-r--r--fs/fuse/dev.c10
-rw-r--r--fs/hugetlbfs/inode.c2
-rw-r--r--fs/jfs/file.c2
-rw-r--r--fs/jfs/inode.c4
-rw-r--r--fs/jfs/namei.c27
-rw-r--r--fs/locks.c38
-rw-r--r--fs/namei.c9
-rw-r--r--fs/namespace.c42
-rw-r--r--fs/nfs/client.c2
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c2
-rw-r--r--fs/nfs/inode.c15
-rw-r--r--fs/nfs/internal.h21
-rw-r--r--fs/nfs/nfs42proc.c19
-rw-r--r--fs/nfs/nfs4proc.c54
-rw-r--r--fs/nfs/nfs4state.c29
-rw-r--r--fs/nfs/pagelist.c7
-rw-r--r--fs/nfs/pnfs.c101
-rw-r--r--fs/nfs/write.c15
-rw-r--r--fs/nfsd/nfs4layouts.c1
-rw-r--r--fs/nfsd/nfs4state.c12
-rw-r--r--fs/nfsd/nfs4xdr.c11
-rw-r--r--fs/notify/mark.c30
-rw-r--r--fs/ocfs2/aops.c4
-rw-r--r--fs/ocfs2/dlmglue.c10
-rw-r--r--fs/pnode.h2
-rw-r--r--fs/proc/Kconfig6
-rw-r--r--fs/proc/base.c5
-rw-r--r--fs/proc/kcore.c4
-rw-r--r--fs/signalfd.c5
-rw-r--r--fs/udf/inode.c19
-rw-r--r--fs/xfs/libxfs/xfs_attr_remote.c44
-rw-r--r--fs/xfs/xfs_file.c21
-rw-r--r--fs/xfs/xfs_log_recover.c11
-rw-r--r--include/asm-generic/mm-arch-hooks.h16
-rw-r--r--include/drm/drmP.h2
-rw-r--r--include/drm/drm_crtc.h2
-rw-r--r--include/drm/drm_crtc_helper.h3
-rw-r--r--include/drm/drm_edid.h19
-rw-r--r--include/drm/drm_pciids.h1
-rw-r--r--include/linux/amba/sp810.h2
-rw-r--r--include/linux/ata.h19
-rw-r--r--include/linux/blk-cgroup.h11
-rw-r--r--include/linux/can/skb.h2
-rw-r--r--include/linux/clkdev.h7
-rw-r--r--include/linux/compat.h2
-rw-r--r--include/linux/configfs.h3
-rw-r--r--include/linux/cper.h22
-rw-r--r--include/linux/cpu.h7
-rw-r--r--include/linux/cpufreq.h1
-rw-r--r--include/linux/dcache.h3
-rw-r--r--include/linux/device.h15
-rw-r--r--include/linux/fs.h35
-rw-r--r--include/linux/ftrace.h3
-rw-r--r--include/linux/gpio/driver.h2
-rw-r--r--include/linux/hid-sensor-hub.h1
-rw-r--r--include/linux/hugetlb.h17
-rw-r--r--include/linux/init.h78
-rw-r--r--include/linux/iommu.h2
-rw-r--r--include/linux/irq.h1
-rw-r--r--include/linux/kernel.h9
-rw-r--r--include/linux/kobject.h5
-rw-r--r--include/linux/kvm_host.h18
-rw-r--r--include/linux/libata.h2
-rw-r--r--include/linux/mfd/max77693-common.h49
-rw-r--r--include/linux/mfd/max77693-private.h134
-rw-r--r--include/linux/mfd/max77843-private.h174
-rw-r--r--include/linux/mm.h28
-rw-r--r--include/linux/mm_types.h9
-rw-r--r--include/linux/mmiotrace.h2
-rw-r--r--include/linux/module.h84
-rw-r--r--include/linux/mtd/nand.h10
-rw-r--r--include/linux/nfs_fs.h7
-rw-r--r--include/linux/nfs_fs_sb.h2
-rw-r--r--include/linux/of_device.h2
-rw-r--r--include/linux/page-flags.h10
-rw-r--r--include/linux/page_owner.h13
-rw-r--r--include/linux/pata_arasan_cf_data.h2
-rw-r--r--include/linux/platform_data/macb.h14
-rw-r--r--include/linux/platform_data/mmc-esdhc-imx.h1
-rw-r--r--include/linux/printk.h6
-rw-r--r--include/linux/regulator/consumer.h16
-rw-r--r--include/linux/regulator/da9211.h19
-rw-r--r--include/linux/regulator/driver.h1
-rw-r--r--include/linux/regulator/machine.h1
-rw-r--r--include/linux/regulator/mt6311.h29
-rw-r--r--include/linux/sched.h16
-rw-r--r--include/linux/skbuff.h20
-rw-r--r--include/linux/usb/cdc_ncm.h7
-rw-r--r--include/media/rc-core.h7
-rw-r--r--include/media/videobuf2-core.h2
-rw-r--r--include/net/act_api.h8
-rw-r--r--include/net/cfg80211.h17
-rw-r--r--include/net/inet_frag.h17
-rw-r--r--include/net/ip.h1
-rw-r--r--include/net/ip_fib.h3
-rw-r--r--include/net/netfilter/nf_conntrack.h2
-rw-r--r--include/net/netns/conntrack.h1
-rw-r--r--include/net/sock.h2
-rw-r--r--include/rdma/ib_verbs.h20
-rw-r--r--include/scsi/scsi_eh.h1
-rw-r--r--include/scsi/scsi_transport_srp.h1
-rw-r--r--include/sound/soc-topology.h12
-rw-r--r--include/target/iscsi/iscsi_target_core.h1
-rw-r--r--include/uapi/drm/amdgpu_drm.h4
-rw-r--r--include/uapi/drm/i915_drm.h8
-rw-r--r--include/uapi/drm/radeon_drm.h2
-rw-r--r--include/uapi/linux/netconf.h1
-rw-r--r--include/uapi/linux/pci_regs.h1
-rw-r--r--include/uapi/linux/virtio_net.h16
-rw-r--r--include/uapi/linux/virtio_pci.h6
-rw-r--r--include/uapi/linux/virtio_ring.h5
-rw-r--r--include/uapi/sound/asoc.h45
-rw-r--r--init/main.c2
-rw-r--r--ipc/mqueue.c5
-rw-r--r--ipc/sem.c47
-rw-r--r--ipc/shm.c2
-rw-r--r--kernel/cpu.c9
-rw-r--r--kernel/cpuset.c2
-rw-r--r--kernel/events/core.c91
-rw-r--r--kernel/events/ring_buffer.c10
-rw-r--r--kernel/fork.c7
-rw-r--r--kernel/irq/chip.c19
-rw-r--r--kernel/irq/resend.c18
-rw-r--r--kernel/kthread.c4
-rw-r--r--kernel/locking/qspinlock_paravirt.h11
-rw-r--r--kernel/module.c8
-rw-r--r--kernel/resource.c6
-rw-r--r--kernel/sched/fair.c2
-rw-r--r--kernel/signal.c13
-rw-r--r--kernel/time/tick-broadcast.c1
-rw-r--r--kernel/time/tick-common.c1
-rw-r--r--kernel/time/timer.c4
-rw-r--r--kernel/trace/ftrace.c52
-rw-r--r--kernel/trace/trace.h1
-rw-r--r--kernel/trace/trace_branch.c17
-rw-r--r--lib/decompress.c5
-rw-r--r--lib/dma-debug.c3
-rw-r--r--lib/hexdump.c7
-rw-r--r--lib/iommu-common.c2
-rw-r--r--lib/kobject.c5
-rw-r--r--lib/rhashtable.c4
-rw-r--r--mm/cma.h2
-rw-r--r--mm/cma_debug.c11
-rw-r--r--mm/huge_memory.c7
-rw-r--r--mm/kasan/kasan.c2
-rw-r--r--mm/kasan/report.c2
-rw-r--r--mm/memory-failure.c54
-rw-r--r--mm/memory_hotplug.c13
-rw-r--r--mm/migrate.c8
-rw-r--r--mm/page-writeback.c4
-rw-r--r--mm/page_alloc.c76
-rw-r--r--mm/page_owner.c7
-rw-r--r--mm/shmem.c4
-rw-r--r--mm/slab.c4
-rw-r--r--mm/slab_common.c3
-rw-r--r--mm/slub.c2
-rw-r--r--mm/vmscan.c16
-rw-r--r--net/9p/client.c2
-rw-r--r--net/9p/trans_virtio.c1
-rw-r--r--net/ax25/ax25_subr.c1
-rw-r--r--net/batman-adv/distributed-arp-table.c18
-rw-r--r--net/batman-adv/gateway_client.c2
-rw-r--r--net/batman-adv/soft-interface.c3
-rw-r--r--net/batman-adv/translation-table.c32
-rw-r--r--net/bluetooth/mgmt.c2
-rw-r--r--net/bluetooth/smp.c4
-rw-r--r--net/bridge/br_forward.c28
-rw-r--r--net/bridge/br_mdb.c18
-rw-r--r--net/bridge/br_multicast.c91
-rw-r--r--net/bridge/br_netfilter_hooks.c16
-rw-r--r--net/bridge/br_netfilter_ipv6.c2
-rw-r--r--net/bridge/br_netlink.c16
-rw-r--r--net/bridge/br_stp.c5
-rw-r--r--net/bridge/br_stp_if.c13
-rw-r--r--net/bridge/br_stp_timer.c4
-rw-r--r--net/caif/caif_socket.c19
-rw-r--r--net/can/af_can.c12
-rw-r--r--net/can/bcm.c2
-rw-r--r--net/can/raw.c7
-rw-r--r--net/core/datagram.c57
-rw-r--r--net/core/dev.c45
-rw-r--r--net/core/dst.c4
-rw-r--r--net/core/gen_estimator.c13
-rw-r--r--net/core/netclassid_cgroup.c3
-rw-r--r--net/core/pktgen.c12
-rw-r--r--net/core/request_sock.c8
-rw-r--r--net/core/rtnetlink.c198
-rw-r--r--net/core/skbuff.c39
-rw-r--r--net/core/sock.c8
-rw-r--r--net/dccp/proto.c2
-rw-r--r--net/dsa/dsa.c6
-rw-r--r--net/dsa/slave.c3
-rw-r--r--net/ieee802154/6lowpan/reassembly.c6
-rw-r--r--net/ipv4/arp.c16
-rw-r--r--net/ipv4/datagram.c16
-rw-r--r--net/ipv4/devinet.c14
-rw-r--r--net/ipv4/fib_lookup.h1
-rw-r--r--net/ipv4/fib_semantics.c41
-rw-r--r--net/ipv4/fib_trie.c9
-rw-r--r--net/ipv4/igmp.c33
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/inet_diag.c4
-rw-r--r--net/ipv4/inet_fragment.c40
-rw-r--r--net/ipv4/inet_hashtables.c11
-rw-r--r--net/ipv4/ip_fragment.c18
-rw-r--r--net/ipv4/ip_tunnel.c8
-rw-r--r--net/ipv4/netfilter/arp_tables.c25
-rw-r--r--net/ipv4/netfilter/ipt_SYNPROXY.c3
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/sysctl_net_ipv4.c10
-rw-r--r--net/ipv4/tcp.c11
-rw-r--r--net/ipv4/tcp_input.c3
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/udp.c13
-rw-r--r--net/ipv6/datagram.c20
-rw-r--r--net/ipv6/ip6_fib.c2
-rw-r--r--net/ipv6/ip6_input.c6
-rw-r--r--net/ipv6/ip6_offload.c2
-rw-r--r--net/ipv6/mcast_snoop.c33
-rw-r--r--net/ipv6/ndisc.c6
-rw-r--r--net/ipv6/netfilter/ip6t_SYNPROXY.c19
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c6
-rw-r--r--net/ipv6/reassembly.c8
-rw-r--r--net/ipv6/route.c90
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/llc/af_llc.c4
-rw-r--r--net/mac80211/debugfs_netdev.c1
-rw-r--r--net/mac80211/iface.c25
-rw-r--r--net/mac80211/mesh_plink.c5
-rw-r--r--net/mac80211/pm.c16
-rw-r--r--net/mac80211/rc80211_minstrel.c11
-rw-r--r--net/mac80211/tdls.c6
-rw-r--r--net/mac80211/tx.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c16
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c78
-rw-r--r--net/netfilter/ipvs/ip_vs_sched.c12
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c41
-rw-r--r--net/netfilter/nf_conntrack_core.c71
-rw-r--r--net/netfilter/nf_conntrack_expect.c3
-rw-r--r--net/netfilter/nf_conntrack_netlink.c5
-rw-r--r--net/netfilter/nf_queue.c2
-rw-r--r--net/netfilter/nf_synproxy_core.c11
-rw-r--r--net/netfilter/nfnetlink.c38
-rw-r--r--net/netfilter/xt_CT.c13
-rw-r--r--net/netfilter/xt_IDLETIMER.c1
-rw-r--r--net/netlink/af_netlink.c86
-rw-r--r--net/openvswitch/actions.c16
-rw-r--r--net/openvswitch/flow_table.c2
-rw-r--r--net/packet/af_packet.c11
-rw-r--r--net/rds/ib_rdma.c4
-rw-r--r--net/rds/info.c2
-rw-r--r--net/rds/transport.c2
-rw-r--r--net/sched/act_api.c11
-rw-r--r--net/sched/act_bpf.c50
-rw-r--r--net/sched/act_mirred.c2
-rw-r--r--net/sched/act_pedit.c5
-rw-r--r--net/sched/cls_bpf.c2
-rw-r--r--net/sched/cls_flow.c5
-rw-r--r--net/sched/cls_flower.c2
-rw-r--r--net/sched/sch_choke.c13
-rw-r--r--net/sched/sch_fq_codel.c35
-rw-r--r--net/sched/sch_plug.c1
-rw-r--r--net/sched/sch_sfq.c2
-rw-r--r--net/sctp/socket.c6
-rw-r--r--net/sunrpc/backchannel_rqst.c6
-rw-r--r--net/sunrpc/clnt.c5
-rw-r--r--net/sunrpc/xprtsock.c25
-rw-r--r--net/switchdev/switchdev.c12
-rw-r--r--net/tipc/socket.c1
-rw-r--r--net/wireless/chan.c45
-rw-r--r--net/wireless/nl80211.c14
-rw-r--r--net/wireless/reg.c8
-rw-r--r--net/wireless/trace.h11
-rw-r--r--samples/trace_events/trace-events-sample.h7
-rwxr-xr-xscripts/checkpatch.pl2
-rwxr-xr-xscripts/kconfig/streamline_config.pl2
-rw-r--r--security/keys/keyring.c8
-rw-r--r--security/yama/yama_lsm.c1
-rw-r--r--sound/core/pcm_native.c2
-rw-r--r--sound/firewire/amdtp.c5
-rw-r--r--sound/firewire/amdtp.h2
-rw-r--r--sound/firewire/fireworks/fireworks.c8
-rw-r--r--sound/firewire/fireworks/fireworks.h1
-rw-r--r--sound/firewire/fireworks/fireworks_stream.c9
-rw-r--r--sound/hda/ext/hdac_ext_controller.c6
-rw-r--r--sound/hda/ext/hdac_ext_stream.c2
-rw-r--r--sound/hda/hdac_i915.c5
-rw-r--r--sound/pci/hda/hda_generic.c2
-rw-r--r--sound/pci/hda/hda_intel.c32
-rw-r--r--sound/pci/hda/patch_cirrus.c4
-rw-r--r--sound/pci/hda/patch_hdmi.c4
-rw-r--r--sound/pci/hda/patch_realtek.c137
-rw-r--r--sound/pci/hda/patch_sigmatel.c3
-rw-r--r--sound/pci/oxygen/oxygen_mixer.c2
-rw-r--r--sound/soc/Kconfig3
-rw-r--r--sound/soc/Makefile3
-rw-r--r--sound/soc/codecs/cs4265.c10
-rw-r--r--sound/soc/codecs/pcm1681.c2
-rw-r--r--sound/soc/codecs/rt5645.c5
-rw-r--r--sound/soc/codecs/rt5645.h4
-rw-r--r--sound/soc/codecs/sgtl5000.h2
-rw-r--r--sound/soc/codecs/ssm4567.c8
-rw-r--r--sound/soc/fsl/fsl_ssi.c2
-rw-r--r--sound/soc/intel/Makefile2
-rw-r--r--sound/soc/intel/atom/sst/sst_drv_interface.c14
-rw-r--r--sound/soc/intel/baytrail/sst-baytrail-ipc.c2
-rw-r--r--sound/soc/intel/boards/cht_bsw_max98090_ti.c4
-rw-r--r--sound/soc/intel/haswell/sst-haswell-ipc.c2
-rw-r--r--sound/soc/mediatek/mt8173-max98090.c17
-rw-r--r--sound/soc/mediatek/mt8173-rt5650-rt5676.c19
-rw-r--r--sound/soc/mediatek/mtk-afe-pcm.c2
-rw-r--r--sound/soc/soc-core.c1
-rw-r--r--sound/soc/soc-dapm.c35
-rw-r--r--sound/soc/soc-topology.c62
-rw-r--r--sound/soc/zte/zx296702-i2s.c4
-rw-r--r--sound/soc/zte/zx296702-spdif.c4
-rw-r--r--sound/sparc/amd7930.c1
-rw-r--r--sound/usb/card.c2
-rw-r--r--sound/usb/line6/pcm.c9
-rw-r--r--sound/usb/mixer_maps.c24
-rw-r--r--sound/usb/quirks-table.h68
-rw-r--r--tools/lib/api/Makefile2
-rw-r--r--tools/lib/hweight.c62
-rw-r--r--tools/lib/traceevent/Makefile2
-rw-r--r--tools/perf/MANIFEST2
-rw-r--r--tools/perf/Makefile.perf19
-rw-r--r--tools/perf/builtin-record.c11
-rw-r--r--tools/perf/builtin-stat.c4
-rw-r--r--tools/perf/builtin-top.c4
-rw-r--r--tools/perf/config/Makefile2
-rw-r--r--tools/perf/ui/browsers/hists.c2
-rw-r--r--tools/perf/util/Build2
-rw-r--r--tools/perf/util/auxtrace.c10
-rw-r--r--tools/perf/util/machine.c20
-rw-r--r--tools/perf/util/python-ext-sources4
-rw-r--r--tools/perf/util/stat-shadow.c8
-rw-r--r--tools/perf/util/symbol.c2
-rw-r--r--tools/perf/util/symbol.h3
-rw-r--r--tools/perf/util/thread.c6
-rw-r--r--tools/perf/util/thread_map.c3
-rw-r--r--tools/perf/util/vdso.c8
-rw-r--r--tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c2
-rw-r--r--virt/kvm/vfio.c5
1427 files changed, 16471 insertions, 10646 deletions
diff --git a/.get_maintainer.ignore b/.get_maintainer.ignore
new file mode 100644
index 000000000000..cca6d870f7a5
--- /dev/null
+++ b/.get_maintainer.ignore
@@ -0,0 +1 @@
Christoph Hellwig <hch@lst.de>
diff --git a/.mailmap b/.mailmap
index 977f958eedbe..4b31af54ccd5 100644
--- a/.mailmap
+++ b/.mailmap
@@ -17,6 +17,7 @@ Aleksey Gorelov <aleksey_gorelov@phoenix.com>
17Al Viro <viro@ftp.linux.org.uk> 17Al Viro <viro@ftp.linux.org.uk>
18Al Viro <viro@zenIV.linux.org.uk> 18Al Viro <viro@zenIV.linux.org.uk>
19Andreas Herrmann <aherrman@de.ibm.com> 19Andreas Herrmann <aherrman@de.ibm.com>
20Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
20Andrew Morton <akpm@linux-foundation.org> 21Andrew Morton <akpm@linux-foundation.org>
21Andrew Vasquez <andrew.vasquez@qlogic.com> 22Andrew Vasquez <andrew.vasquez@qlogic.com>
22Andy Adamson <andros@citi.umich.edu> 23Andy Adamson <andros@citi.umich.edu>
@@ -116,6 +117,7 @@ Shiraz Hashim <shiraz.linux.kernel@gmail.com> <shiraz.hashim@st.com>
116Simon Kelley <simon@thekelleys.org.uk> 117Simon Kelley <simon@thekelleys.org.uk>
117Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr> 118Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr>
118Stephen Hemminger <shemminger@osdl.org> 119Stephen Hemminger <shemminger@osdl.org>
120Sudeep Holla <sudeep.holla@arm.com> Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
119Sumit Semwal <sumit.semwal@ti.com> 121Sumit Semwal <sumit.semwal@ti.com>
120Tejun Heo <htejun@gmail.com> 122Tejun Heo <htejun@gmail.com>
121Thomas Graf <tgraf@suug.ch> 123Thomas Graf <tgraf@suug.ch>
@@ -125,7 +127,9 @@ Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de>
125Uwe Kleine-König <ukl@pengutronix.de> 127Uwe Kleine-König <ukl@pengutronix.de>
126Uwe Kleine-König <Uwe.Kleine-Koenig@digi.com> 128Uwe Kleine-König <Uwe.Kleine-Koenig@digi.com>
127Valdis Kletnieks <Valdis.Kletnieks@vt.edu> 129Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
128Viresh Kumar <viresh.linux@gmail.com> <viresh.kumar@st.com> 130Viresh Kumar <vireshk@kernel.org> <viresh.kumar@st.com>
131Viresh Kumar <vireshk@kernel.org> <viresh.linux@gmail.com>
132Viresh Kumar <vireshk@kernel.org> <viresh.kumar2@arm.com>
129Takashi YOSHII <takashi.yoshii.zj@renesas.com> 133Takashi YOSHII <takashi.yoshii.zj@renesas.com>
130Yusuke Goda <goda.yusuke@renesas.com> 134Yusuke Goda <goda.yusuke@renesas.com>
131Gustavo Padovan <gustavo@las.ic.unicamp.br> 135Gustavo Padovan <gustavo@las.ic.unicamp.br>
diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
index bbed111c31b4..70c9b1ac66db 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio
+++ b/Documentation/ABI/testing/sysfs-bus-iio
@@ -1234,10 +1234,8 @@ Description:
1234 object is near the sensor, usually be observing 1234 object is near the sensor, usually be observing
1235 reflectivity of infrared or ultrasound emitted. 1235 reflectivity of infrared or ultrasound emitted.
1236 Often these sensors are unit less and as such conversion 1236 Often these sensors are unit less and as such conversion
1237 to SI units is not possible. Where it is, the units should 1237 to SI units is not possible. Higher proximity measurements
1238 be meters. If such a conversion is not possible, the reported 1238 indicate closer objects, and vice versa.
1239 values should behave in the same way as a distance, i.e. lower
1240 values indicate something is closer to the sensor.
1241 1239
1242What: /sys/.../iio:deviceX/in_illuminance_input 1240What: /sys/.../iio:deviceX/in_illuminance_input
1243What: /sys/.../iio:deviceX/in_illuminance_raw 1241What: /sys/.../iio:deviceX/in_illuminance_raw
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index c0312cbd023d..2fb9a5457522 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -3383,7 +3383,7 @@ void intel_crt_init(struct drm_device *dev)
3383 <td valign="top" >TBD</td> 3383 <td valign="top" >TBD</td>
3384 </tr> 3384 </tr>
3385 <tr> 3385 <tr>
3386 <td rowspan="2" valign="top" >omap</td> 3386 <td valign="top" >omap</td>
3387 <td valign="top" >Generic</td> 3387 <td valign="top" >Generic</td>
3388 <td valign="top" >“zorder”</td> 3388 <td valign="top" >“zorder”</td>
3389 <td valign="top" >RANGE</td> 3389 <td valign="top" >RANGE</td>
diff --git a/Documentation/arm/SPEAr/overview.txt b/Documentation/arm/SPEAr/overview.txt
index 65610bf52ebf..1b049be6c84f 100644
--- a/Documentation/arm/SPEAr/overview.txt
+++ b/Documentation/arm/SPEAr/overview.txt
@@ -60,4 +60,4 @@ Introduction
60 Document Author 60 Document Author
61 --------------- 61 ---------------
62 62
63 Viresh Kumar <viresh.linux@gmail.com>, (c) 2010-2012 ST Microelectronics 63 Viresh Kumar <vireshk@kernel.org>, (c) 2010-2012 ST Microelectronics
diff --git a/Documentation/device-mapper/cache.txt b/Documentation/device-mapper/cache.txt
index 82960cffbad3..785eab87aa71 100644
--- a/Documentation/device-mapper/cache.txt
+++ b/Documentation/device-mapper/cache.txt
@@ -258,6 +258,12 @@ cache metadata mode : ro if read-only, rw if read-write
258 no further I/O will be permitted and the status will just 258 no further I/O will be permitted and the status will just
259 contain the string 'Fail'. The userspace recovery tools 259 contain the string 'Fail'. The userspace recovery tools
260 should then be used. 260 should then be used.
261needs_check : 'needs_check' if set, '-' if not set
262 A metadata operation has failed, resulting in the needs_check
263 flag being set in the metadata's superblock. The metadata
264 device must be deactivated and checked/repaired before the
265 cache can be made fully operational again. '-' indicates
266 needs_check is not set.
261 267
262Messages 268Messages
263-------- 269--------
diff --git a/Documentation/device-mapper/thin-provisioning.txt b/Documentation/device-mapper/thin-provisioning.txt
index 4f67578b2954..1699a55b7b70 100644
--- a/Documentation/device-mapper/thin-provisioning.txt
+++ b/Documentation/device-mapper/thin-provisioning.txt
@@ -296,7 +296,7 @@ ii) Status
296 underlying device. When this is enabled when loading the table, 296 underlying device. When this is enabled when loading the table,
297 it can get disabled if the underlying device doesn't support it. 297 it can get disabled if the underlying device doesn't support it.
298 298
299 ro|rw 299 ro|rw|out_of_data_space
300 If the pool encounters certain types of device failures it will 300 If the pool encounters certain types of device failures it will
301 drop into a read-only metadata mode in which no changes to 301 drop into a read-only metadata mode in which no changes to
302 the pool metadata (like allocating new blocks) are permitted. 302 the pool metadata (like allocating new blocks) are permitted.
@@ -314,6 +314,13 @@ ii) Status
314 module parameter can be used to change this timeout -- it 314 module parameter can be used to change this timeout -- it
315 defaults to 60 seconds but may be disabled using a value of 0. 315 defaults to 60 seconds but may be disabled using a value of 0.
316 316
317 needs_check
318 A metadata operation has failed, resulting in the needs_check
319 flag being set in the metadata's superblock. The metadata
320 device must be deactivated and checked/repaired before the
321 thin-pool can be made fully operational again. '-' indicates
322 needs_check is not set.
323
317iii) Messages 324iii) Messages
318 325
319 create_thin <dev id> 326 create_thin <dev id>
diff --git a/Documentation/devicetree/bindings/arm/cpus.txt b/Documentation/devicetree/bindings/arm/cpus.txt
index d6b794cef0b8..91e6e5c478d0 100644
--- a/Documentation/devicetree/bindings/arm/cpus.txt
+++ b/Documentation/devicetree/bindings/arm/cpus.txt
@@ -199,6 +199,7 @@ nodes to be present and contain the properties described below.
199 "qcom,kpss-acc-v1" 199 "qcom,kpss-acc-v1"
200 "qcom,kpss-acc-v2" 200 "qcom,kpss-acc-v2"
201 "rockchip,rk3066-smp" 201 "rockchip,rk3066-smp"
202 "ste,dbx500-smp"
202 203
203 - cpu-release-addr 204 - cpu-release-addr
204 Usage: required for systems that have an "enable-method" 205 Usage: required for systems that have an "enable-method"
diff --git a/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt b/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt
index d3058768b23d..c53e0b08032f 100644
--- a/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt
+++ b/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt
@@ -35,7 +35,7 @@ Example:
35 device_type = "dma"; 35 device_type = "dma";
36 reg = <0x0 0x1f270000 0x0 0x10000>, 36 reg = <0x0 0x1f270000 0x0 0x10000>,
37 <0x0 0x1f200000 0x0 0x10000>, 37 <0x0 0x1f200000 0x0 0x10000>,
38 <0x0 0x1b008000 0x0 0x2000>, 38 <0x0 0x1b000000 0x0 0x400000>,
39 <0x0 0x1054a000 0x0 0x100>; 39 <0x0 0x1054a000 0x0 0x100>;
40 interrupts = <0x0 0x82 0x4>, 40 interrupts = <0x0 0x82 0x4>,
41 <0x0 0xb8 0x4>, 41 <0x0 0xb8 0x4>,
diff --git a/Documentation/devicetree/bindings/drm/imx/fsl-imx-drm.txt b/Documentation/devicetree/bindings/drm/imx/fsl-imx-drm.txt
index e75f0e549fff..971c3eedb1c7 100644
--- a/Documentation/devicetree/bindings/drm/imx/fsl-imx-drm.txt
+++ b/Documentation/devicetree/bindings/drm/imx/fsl-imx-drm.txt
@@ -65,8 +65,10 @@ Optional properties:
65- edid: verbatim EDID data block describing attached display. 65- edid: verbatim EDID data block describing attached display.
66- ddc: phandle describing the i2c bus handling the display data 66- ddc: phandle describing the i2c bus handling the display data
67 channel 67 channel
68- port: A port node with endpoint definitions as defined in 68- port@[0-1]: Port nodes with endpoint definitions as defined in
69 Documentation/devicetree/bindings/media/video-interfaces.txt. 69 Documentation/devicetree/bindings/media/video-interfaces.txt.
70 Port 0 is the input port connected to the IPU display interface,
71 port 1 is the output port connected to a panel.
70 72
71example: 73example:
72 74
@@ -75,9 +77,29 @@ display@di0 {
75 edid = [edid-data]; 77 edid = [edid-data];
76 interface-pix-fmt = "rgb24"; 78 interface-pix-fmt = "rgb24";
77 79
78 port { 80 port@0 {
81 reg = <0>;
82
79 display_in: endpoint { 83 display_in: endpoint {
80 remote-endpoint = <&ipu_di0_disp0>; 84 remote-endpoint = <&ipu_di0_disp0>;
81 }; 85 };
82 }; 86 };
87
88 port@1 {
89 reg = <1>;
90
91 display_out: endpoint {
92 remote-endpoint = <&panel_in>;
93 };
94 };
95};
96
97panel {
98 ...
99
100 port {
101 panel_in: endpoint {
102 remote-endpoint = <&display_out>;
103 };
104 };
83}; 105};
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt b/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt
index c03eec116872..3443e0f838df 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt
@@ -35,3 +35,6 @@ the PCIe specification.
35 35
36 NOTE: this only applies to the SMMU itself, not 36 NOTE: this only applies to the SMMU itself, not
37 masters connected upstream of the SMMU. 37 masters connected upstream of the SMMU.
38
39- hisilicon,broken-prefetch-cmd
40 : Avoid sending CMD_PREFETCH_* commands to the SMMU.
diff --git a/Documentation/devicetree/bindings/mfd/rk808.txt b/Documentation/devicetree/bindings/mfd/rk808.txt
index 9e6e2592e5c8..4ca6aab4273a 100644
--- a/Documentation/devicetree/bindings/mfd/rk808.txt
+++ b/Documentation/devicetree/bindings/mfd/rk808.txt
@@ -24,6 +24,10 @@ Optional properties:
24- vcc10-supply: The input supply for LDO_REG6 24- vcc10-supply: The input supply for LDO_REG6
25- vcc11-supply: The input supply for LDO_REG8 25- vcc11-supply: The input supply for LDO_REG8
26- vcc12-supply: The input supply for SWITCH_REG2 26- vcc12-supply: The input supply for SWITCH_REG2
27- dvs-gpios: buck1/2 can be controlled by gpio dvs, this is GPIO specifiers
28 for 2 host gpio's used for dvs. The format of the gpio specifier depends in
29 the gpio controller. If DVS GPIOs aren't present, voltage changes will happen
30 very quickly with no slow ramp time.
27 31
28Regulators: All the regulators of RK808 to be instantiated shall be 32Regulators: All the regulators of RK808 to be instantiated shall be
29listed in a child node named 'regulators'. Each regulator is represented 33listed in a child node named 'regulators'. Each regulator is represented
@@ -55,7 +59,9 @@ Example:
55 interrupt-parent = <&gpio0>; 59 interrupt-parent = <&gpio0>;
56 interrupts = <4 IRQ_TYPE_LEVEL_LOW>; 60 interrupts = <4 IRQ_TYPE_LEVEL_LOW>;
57 pinctrl-names = "default"; 61 pinctrl-names = "default";
58 pinctrl-0 = <&pmic_int>; 62 pinctrl-0 = <&pmic_int &dvs_1 &dvs_2>;
63 dvs-gpios = <&gpio7 11 GPIO_ACTIVE_HIGH>,
64 <&gpio7 15 GPIO_ACTIVE_HIGH>;
59 reg = <0x1b>; 65 reg = <0x1b>;
60 rockchip,system-power-controller; 66 rockchip,system-power-controller;
61 wakeup-source; 67 wakeup-source;
diff --git a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
index 5d0376b8f202..211e7785f4d2 100644
--- a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
+++ b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
@@ -17,7 +17,6 @@ Required properties:
17 "fsl,imx6sx-usdhc" 17 "fsl,imx6sx-usdhc"
18 18
19Optional properties: 19Optional properties:
20- fsl,cd-controller : Indicate to use controller internal card detection
21- fsl,wp-controller : Indicate to use controller internal write protection 20- fsl,wp-controller : Indicate to use controller internal write protection
22- fsl,delay-line : Specify the number of delay cells for override mode. 21- fsl,delay-line : Specify the number of delay cells for override mode.
23 This is used to set the clock delay for DLL(Delay Line) on override mode 22 This is used to set the clock delay for DLL(Delay Line) on override mode
@@ -35,7 +34,6 @@ esdhc@70004000 {
35 compatible = "fsl,imx51-esdhc"; 34 compatible = "fsl,imx51-esdhc";
36 reg = <0x70004000 0x4000>; 35 reg = <0x70004000 0x4000>;
37 interrupts = <1>; 36 interrupts = <1>;
38 fsl,cd-controller;
39 fsl,wp-controller; 37 fsl,wp-controller;
40}; 38};
41 39
diff --git a/Documentation/devicetree/bindings/phy/ti-phy.txt b/Documentation/devicetree/bindings/phy/ti-phy.txt
index 305e3df3d9b1..9cf9446eaf2e 100644
--- a/Documentation/devicetree/bindings/phy/ti-phy.txt
+++ b/Documentation/devicetree/bindings/phy/ti-phy.txt
@@ -82,6 +82,9 @@ Optional properties:
82 - id: If there are multiple instance of the same type, in order to 82 - id: If there are multiple instance of the same type, in order to
83 differentiate between each instance "id" can be used (e.g., multi-lane PCIe 83 differentiate between each instance "id" can be used (e.g., multi-lane PCIe
84 PHY). If "id" is not provided, it is set to default value of '1'. 84 PHY). If "id" is not provided, it is set to default value of '1'.
85 - syscon-pllreset: Handle to system control region that contains the
86 CTRL_CORE_SMA_SW_0 register and register offset to the CTRL_CORE_SMA_SW_0
87 register that contains the SATA_PLL_SOFT_RESET bit. Only valid for sata_phy.
85 88
86This is usually a subnode of ocp2scp to which it is connected. 89This is usually a subnode of ocp2scp to which it is connected.
87 90
@@ -100,3 +103,16 @@ usb3phy@4a084400 {
100 "sysclk", 103 "sysclk",
101 "refclk"; 104 "refclk";
102}; 105};
106
107sata_phy: phy@4A096000 {
108 compatible = "ti,phy-pipe3-sata";
109 reg = <0x4A096000 0x80>, /* phy_rx */
110 <0x4A096400 0x64>, /* phy_tx */
111 <0x4A096800 0x40>; /* pll_ctrl */
112 reg-names = "phy_rx", "phy_tx", "pll_ctrl";
113 ctrl-module = <&omap_control_sata>;
114 clocks = <&sys_clkin1>, <&sata_ref_clk>;
115 clock-names = "sysclk", "refclk";
116 syscon-pllreset = <&scm_conf 0x3fc>;
117 #phy-cells = <0>;
118};
diff --git a/Documentation/devicetree/bindings/regulator/da9210.txt b/Documentation/devicetree/bindings/regulator/da9210.txt
index 3297c53cb915..7aa9b1fa6b21 100644
--- a/Documentation/devicetree/bindings/regulator/da9210.txt
+++ b/Documentation/devicetree/bindings/regulator/da9210.txt
@@ -5,6 +5,10 @@ Required properties:
5- compatible: must be "dlg,da9210" 5- compatible: must be "dlg,da9210"
6- reg: the i2c slave address of the regulator. It should be 0x68. 6- reg: the i2c slave address of the regulator. It should be 0x68.
7 7
8Optional properties:
9
10- interrupts: a reference to the DA9210 interrupt, if available.
11
8Any standard regulator properties can be used to configure the single da9210 12Any standard regulator properties can be used to configure the single da9210
9DCDC. 13DCDC.
10 14
diff --git a/Documentation/devicetree/bindings/regulator/da9211.txt b/Documentation/devicetree/bindings/regulator/da9211.txt
index eb618907c7de..c620493e8dbe 100644
--- a/Documentation/devicetree/bindings/regulator/da9211.txt
+++ b/Documentation/devicetree/bindings/regulator/da9211.txt
@@ -1,7 +1,7 @@
1* Dialog Semiconductor DA9211/DA9213 Voltage Regulator 1* Dialog Semiconductor DA9211/DA9213/DA9215 Voltage Regulator
2 2
3Required properties: 3Required properties:
4- compatible: "dlg,da9211" or "dlg,da9213". 4- compatible: "dlg,da9211" or "dlg,da9213" or "dlg,da9215"
5- reg: I2C slave address, usually 0x68. 5- reg: I2C slave address, usually 0x68.
6- interrupts: the interrupt outputs of the controller 6- interrupts: the interrupt outputs of the controller
7- regulators: A node that houses a sub-node for each regulator within the 7- regulators: A node that houses a sub-node for each regulator within the
@@ -66,3 +66,31 @@ Example 2) DA9213
66 }; 66 };
67 }; 67 };
68 }; 68 };
69
70
71Example 3) DA9215
72 pmic: da9215@68 {
73 compatible = "dlg,da9215";
74 reg = <0x68>;
75 interrupts = <3 27>;
76
77 regulators {
78 BUCKA {
79 regulator-name = "VBUCKA";
80 regulator-min-microvolt = < 300000>;
81 regulator-max-microvolt = <1570000>;
82 regulator-min-microamp = <4000000>;
83 regulator-max-microamp = <7000000>;
84 enable-gpios = <&gpio 27 0>;
85 };
86 BUCKB {
87 regulator-name = "VBUCKB";
88 regulator-min-microvolt = < 300000>;
89 regulator-max-microvolt = <1570000>;
90 regulator-min-microamp = <4000000>;
91 regulator-max-microamp = <7000000>;
92 enable-gpios = <&gpio 17 0>;
93 };
94 };
95 };
96
diff --git a/Documentation/devicetree/bindings/regulator/max8973-regulator.txt b/Documentation/devicetree/bindings/regulator/max8973-regulator.txt
index 55efb24e5683..f80ea2fe27e6 100644
--- a/Documentation/devicetree/bindings/regulator/max8973-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/max8973-regulator.txt
@@ -25,6 +25,12 @@ Optional properties:
25-maxim,enable-frequency-shift: boolean, enable 9% frequency shift. 25-maxim,enable-frequency-shift: boolean, enable 9% frequency shift.
26-maxim,enable-bias-control: boolean, enable bias control. By enabling this 26-maxim,enable-bias-control: boolean, enable bias control. By enabling this
27 startup delay can be reduce to 20us from 220us. 27 startup delay can be reduce to 20us from 220us.
28-maxim,enable-etr: boolean, enable Enhanced Transient Response.
29-maxim,enable-high-etr-sensitivity: boolean, Enhanced transient response
30 circuit is enabled and set for high sensitivity. If this
31 property is available then etr will be enable default.
32
33Enhanced transient response (ETR) will affect the configuration of CKADV.
28 34
29Example: 35Example:
30 36
diff --git a/Documentation/devicetree/bindings/regulator/mt6311-regulator.txt b/Documentation/devicetree/bindings/regulator/mt6311-regulator.txt
new file mode 100644
index 000000000000..02649d8b3f5a
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/mt6311-regulator.txt
@@ -0,0 +1,35 @@
1Mediatek MT6311 Regulator Driver
2
3Required properties:
4- compatible: "mediatek,mt6311-regulator"
5- reg: I2C slave address, usually 0x6b.
6- regulators: List of regulators provided by this controller. It is named
7 to VDVFS and VBIASN.
8 The definition for each of these nodes is defined using the standard binding
9 for regulators at Documentation/devicetree/bindings/regulator/regulator.txt.
10
11The valid names for regulators are:
12BUCK:
13 VDVFS
14LDO:
15 VBIASN
16
17Example:
18 mt6311: pmic@6b {
19 compatible = "mediatek,mt6311-regulator";
20 reg = <0x6b>;
21
22 regulators {
23 mt6311_vcpu_reg: VDVFS {
24 regulator-name = "VDVFS";
25 regulator-min-microvolt = < 600000>;
26 regulator-max-microvolt = <1400000>;
27 regulator-ramp-delay = <10000>;
28 };
29 mt6311_ldo_reg: VBIASN {
30 regulator-name = "VBIASN";
31 regulator-min-microvolt = <200000>;
32 regulator-max-microvolt = <800000>;
33 };
34 };
35 };
diff --git a/Documentation/devicetree/bindings/regulator/pwm-regulator.txt b/Documentation/devicetree/bindings/regulator/pwm-regulator.txt
index ce91f61feb12..ed936f0f34f2 100644
--- a/Documentation/devicetree/bindings/regulator/pwm-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/pwm-regulator.txt
@@ -1,27 +1,68 @@
1pwm regulator bindings 1Bindings for the Generic PWM Regulator
2======================================
3
4Currently supports 2 modes of operation:
5
6Voltage Table: When in this mode, a voltage table (See below) of
7 predefined voltage <=> duty-cycle values must be
8 provided via DT. Limitations are that the regulator can
9 only operate at the voltages supplied in the table.
10 Intermediary duty-cycle values which would normally
11 allow finer grained voltage selection are ignored and
12 rendered useless. Although more control is given to
13 the user if the assumptions made in continuous-voltage
14 mode do not reign true.
15
16Continuous Voltage: This mode uses the regulator's maximum and minimum
17 supplied voltages specified in the
18 regulator-{min,max}-microvolt properties to calculate
19 appropriate duty-cycle values. This allows for a much
20 more fine grained solution when compared with
21 voltage-table mode above. This solution does make an
22 assumption that a %50 duty-cycle value will cause the
23 regulator voltage to run at half way between the
24 supplied max_uV and min_uV values.
2 25
3Required properties: 26Required properties:
4- compatible: Should be "pwm-regulator" 27--------------------
5- pwms: OF device-tree PWM specification (see PWM binding pwm.txt) 28- compatible: Should be "pwm-regulator"
6- voltage-table: voltage and duty table, include 2 members in each set of 29
7 brackets, first one is voltage(unit: uv), the next is duty(unit: percent) 30- pwms: PWM specification (See: ../pwm/pwm.txt)
31
32Only required for Voltage Table Mode:
33- voltage-table: Voltage and Duty-Cycle table consisting of 2 cells
34 First cell is voltage in microvolts (uV)
35 Second cell is duty-cycle in percent (%)
36
37NB: To be clear, if voltage-table is provided, then the device will be used
38in Voltage Table Mode. If no voltage-table is provided, then the device will
39be used in Continuous Voltage Mode.
8 40
9Any property defined as part of the core regulator binding defined in 41Any property defined as part of the core regulator binding can also be used.
10regulator.txt can also be used. 42(See: ../regulator/regulator.txt)
11 43
12Example: 44Continuous Voltage Example:
13 pwm_regulator { 45 pwm_regulator {
14 compatible = "pwm-regulator; 46 compatible = "pwm-regulator;
15 pwms = <&pwm1 0 8448 0>; 47 pwms = <&pwm1 0 8448 0>;
48 regulator-min-microvolt = <1016000>;
49 regulator-max-microvolt = <1114000>;
50 regulator-name = "vdd_logic";
51 };
16 52
53Voltage Table Example:
54 pwm_regulator {
55 compatible = "pwm-regulator;
56 pwms = <&pwm1 0 8448 0>;
57 regulator-min-microvolt = <1016000>;
58 regulator-max-microvolt = <1114000>;
59 regulator-name = "vdd_logic";
60
61 /* Voltage Duty-Cycle */
17 voltage-table = <1114000 0>, 62 voltage-table = <1114000 0>,
18 <1095000 10>, 63 <1095000 10>,
19 <1076000 20>, 64 <1076000 20>,
20 <1056000 30>, 65 <1056000 30>,
21 <1036000 40>, 66 <1036000 40>,
22 <1016000 50>; 67 <1016000 50>;
23
24 regulator-min-microvolt = <1016000>;
25 regulator-max-microvolt = <1114000>;
26 regulator-name = "vdd_logic";
27 }; 68 };
diff --git a/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt b/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt
index 75b4604bad07..d00bfd8624a5 100644
--- a/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt
@@ -91,13 +91,65 @@ see regulator.txt - with additional custom properties described below:
91- regulator-initial-mode: 91- regulator-initial-mode:
92 Usage: optional 92 Usage: optional
93 Value type: <u32> 93 Value type: <u32>
94 Descrption: 1 = Set initial mode to high power mode (HPM), also referred 94 Description: 2 = Set initial mode to auto mode (automatically select
95 to as NPM. HPM consumes more ground current than LPM, but 95 between HPM and LPM); not available on boost type
96 regulators.
97
98 1 = Set initial mode to high power mode (HPM), also referred
99 to as NPM. HPM consumes more ground current than LPM, but
96 it can source significantly higher load current. HPM is not 100 it can source significantly higher load current. HPM is not
97 available on boost type regulators. For voltage switch type 101 available on boost type regulators. For voltage switch type
98 regulators, HPM implies that over current protection and 102 regulators, HPM implies that over current protection and
99 soft start are active all the time. 0 = Set initial mode to 103 soft start are active all the time.
100 low power mode (LPM). 104
105 0 = Set initial mode to low power mode (LPM).
106
107- qcom,ocp-max-retries:
108 Usage: optional
109 Value type: <u32>
110 Description: Maximum number of times to try toggling a voltage switch
111 off and back on as a result of consecutive over current
112 events.
113
114- qcom,ocp-retry-delay:
115 Usage: optional
116 Value type: <u32>
117 Description: Time to delay in milliseconds between each voltage switch
118 toggle after an over current event takes place.
119
120- qcom,pin-ctrl-enable:
121 Usage: optional
122 Value type: <u32>
123 Description: Bit mask specifying which hardware pins should be used to
124 enable the regulator, if any; supported bits are:
125 0 = ignore all hardware enable signals
126 BIT(0) = follow HW0_EN signal
127 BIT(1) = follow HW1_EN signal
128 BIT(2) = follow HW2_EN signal
129 BIT(3) = follow HW3_EN signal
130
131- qcom,pin-ctrl-hpm:
132 Usage: optional
133 Value type: <u32>
134 Description: Bit mask specifying which hardware pins should be used to
135 force the regulator into high power mode, if any;
136 supported bits are:
137 0 = ignore all hardware enable signals
138 BIT(0) = follow HW0_EN signal
139 BIT(1) = follow HW1_EN signal
140 BIT(2) = follow HW2_EN signal
141 BIT(3) = follow HW3_EN signal
142 BIT(4) = follow PMIC awake state
143
144- qcom,vs-soft-start-strength:
145 Usage: optional
146 Value type: <u32>
147 Description: This property sets the soft start strength for voltage
148 switch type regulators; supported values are:
149 0 = 0.05 uA
150 1 = 0.25 uA
151 2 = 0.55 uA
152 3 = 0.75 uA
101 153
102Example: 154Example:
103 155
diff --git a/Documentation/devicetree/bindings/regulator/regulator.txt b/Documentation/devicetree/bindings/regulator/regulator.txt
index db88feb28c03..24bd422cecd5 100644
--- a/Documentation/devicetree/bindings/regulator/regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/regulator.txt
@@ -42,6 +42,7 @@ Optional properties:
42- regulator-system-load: Load in uA present on regulator that is not captured by 42- regulator-system-load: Load in uA present on regulator that is not captured by
43 any consumer request. 43 any consumer request.
44- regulator-pull-down: Enable pull down resistor when the regulator is disabled. 44- regulator-pull-down: Enable pull down resistor when the regulator is disabled.
45- regulator-over-current-protection: Enable over current protection.
45 46
46Deprecated properties: 47Deprecated properties:
47- regulator-compatible: If a regulator chip contains multiple 48- regulator-compatible: If a regulator chip contains multiple
diff --git a/Documentation/devicetree/bindings/sound/mt8173-max98090.txt b/Documentation/devicetree/bindings/sound/mt8173-max98090.txt
index 829bd26d17f8..519e97c8f1b8 100644
--- a/Documentation/devicetree/bindings/sound/mt8173-max98090.txt
+++ b/Documentation/devicetree/bindings/sound/mt8173-max98090.txt
@@ -3,11 +3,13 @@ MT8173 with MAX98090 CODEC
3Required properties: 3Required properties:
4- compatible : "mediatek,mt8173-max98090" 4- compatible : "mediatek,mt8173-max98090"
5- mediatek,audio-codec: the phandle of the MAX98090 audio codec 5- mediatek,audio-codec: the phandle of the MAX98090 audio codec
6- mediatek,platform: the phandle of MT8173 ASoC platform
6 7
7Example: 8Example:
8 9
9 sound { 10 sound {
10 compatible = "mediatek,mt8173-max98090"; 11 compatible = "mediatek,mt8173-max98090";
11 mediatek,audio-codec = <&max98090>; 12 mediatek,audio-codec = <&max98090>;
13 mediatek,platform = <&afe>;
12 }; 14 };
13 15
diff --git a/Documentation/devicetree/bindings/sound/mt8173-rt5650-rt5676.txt b/Documentation/devicetree/bindings/sound/mt8173-rt5650-rt5676.txt
index 61e98c976bd4..f205ce9e31dd 100644
--- a/Documentation/devicetree/bindings/sound/mt8173-rt5650-rt5676.txt
+++ b/Documentation/devicetree/bindings/sound/mt8173-rt5650-rt5676.txt
@@ -3,11 +3,13 @@ MT8173 with RT5650 RT5676 CODECS
3Required properties: 3Required properties:
4- compatible : "mediatek,mt8173-rt5650-rt5676" 4- compatible : "mediatek,mt8173-rt5650-rt5676"
5- mediatek,audio-codec: the phandles of rt5650 and rt5676 codecs 5- mediatek,audio-codec: the phandles of rt5650 and rt5676 codecs
6- mediatek,platform: the phandle of MT8173 ASoC platform
6 7
7Example: 8Example:
8 9
9 sound { 10 sound {
10 compatible = "mediatek,mt8173-rt5650-rt5676"; 11 compatible = "mediatek,mt8173-rt5650-rt5676";
11 mediatek,audio-codec = <&rt5650 &rt5676>; 12 mediatek,audio-codec = <&rt5650 &rt5676>;
13 mediatek,platform = <&afe>;
12 }; 14 };
13 15
diff --git a/Documentation/devicetree/bindings/spi/spi-ath79.txt b/Documentation/devicetree/bindings/spi/spi-ath79.txt
index f1ad9c367532..9c696fa66f81 100644
--- a/Documentation/devicetree/bindings/spi/spi-ath79.txt
+++ b/Documentation/devicetree/bindings/spi/spi-ath79.txt
@@ -3,7 +3,7 @@ Binding for Qualcomm Atheros AR7xxx/AR9xxx SPI controller
3Required properties: 3Required properties:
4- compatible: has to be "qca,<soc-type>-spi", "qca,ar7100-spi" as fallback. 4- compatible: has to be "qca,<soc-type>-spi", "qca,ar7100-spi" as fallback.
5- reg: Base address and size of the controllers memory area 5- reg: Base address and size of the controllers memory area
6- clocks: phandle to the AHB clock. 6- clocks: phandle of the AHB clock.
7- clock-names: has to be "ahb". 7- clock-names: has to be "ahb".
8- #address-cells: <1>, as required by generic SPI binding. 8- #address-cells: <1>, as required by generic SPI binding.
9- #size-cells: <0>, also as required by generic SPI binding. 9- #size-cells: <0>, also as required by generic SPI binding.
@@ -12,9 +12,9 @@ Child nodes as per the generic SPI binding.
12 12
13Example: 13Example:
14 14
15 spi@1F000000 { 15 spi@1f000000 {
16 compatible = "qca,ar9132-spi", "qca,ar7100-spi"; 16 compatible = "qca,ar9132-spi", "qca,ar7100-spi";
17 reg = <0x1F000000 0x10>; 17 reg = <0x1f000000 0x10>;
18 18
19 clocks = <&pll 2>; 19 clocks = <&pll 2>;
20 clock-names = "ahb"; 20 clock-names = "ahb";
diff --git a/Documentation/hwmon/nct7904 b/Documentation/hwmon/nct7904
index 014f112e2a14..57fffe33ebfc 100644
--- a/Documentation/hwmon/nct7904
+++ b/Documentation/hwmon/nct7904
@@ -35,11 +35,11 @@ temp1_input Local temperature (1/1000 degree,
35temp[2-9]_input CPU temperatures (1/1000 degree, 35temp[2-9]_input CPU temperatures (1/1000 degree,
36 0.125 degree resolution) 36 0.125 degree resolution)
37 37
38fan[1-4]_mode R/W, 0/1 for manual or SmartFan mode 38pwm[1-4]_enable R/W, 1/2 for manual or SmartFan mode
39 Setting SmartFan mode is supported only if it has been 39 Setting SmartFan mode is supported only if it has been
40 previously configured by BIOS (or configuration EEPROM) 40 previously configured by BIOS (or configuration EEPROM)
41 41
42fan[1-4]_pwm R/O in SmartFan mode, R/W in manual control mode 42pwm[1-4] R/O in SmartFan mode, R/W in manual control mode
43 43
44The driver checks sensor control registers and does not export the sensors 44The driver checks sensor control registers and does not export the sensors
45that are not enabled. Anyway, a sensor that is enabled may actually be not 45that are not enabled. Anyway, a sensor that is enabled may actually be not
diff --git a/Documentation/input/alps.txt b/Documentation/input/alps.txt
index c86f2f1ae4f6..1fec1135791d 100644
--- a/Documentation/input/alps.txt
+++ b/Documentation/input/alps.txt
@@ -119,8 +119,10 @@ ALPS Absolute Mode - Protocol Version 2
119 byte 5: 0 z6 z5 z4 z3 z2 z1 z0 119 byte 5: 0 z6 z5 z4 z3 z2 z1 z0
120 120
121Protocol Version 2 DualPoint devices send standard PS/2 mouse packets for 121Protocol Version 2 DualPoint devices send standard PS/2 mouse packets for
122the DualPoint Stick. For non interleaved dualpoint devices the pointingstick 122the DualPoint Stick. The M, R and L bits signal the combined status of both
123buttons get reported separately in the PSM, PSR and PSL bits. 123the pointingstick and touchpad buttons, except for Dell dualpoint devices
124where the pointingstick buttons get reported separately in the PSM, PSR
125and PSL bits.
124 126
125Dualpoint device -- interleaved packet format 127Dualpoint device -- interleaved packet format
126--------------------------------------------- 128---------------------------------------------
diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
index e63b446d973c..13f888a02a3d 100644
--- a/Documentation/kbuild/makefiles.txt
+++ b/Documentation/kbuild/makefiles.txt
@@ -952,6 +952,14 @@ When kbuild executes, the following steps are followed (roughly):
952 $(KBUILD_ARFLAGS) set by the top level Makefile to "D" (deterministic 952 $(KBUILD_ARFLAGS) set by the top level Makefile to "D" (deterministic
953 mode) if this option is supported by $(AR). 953 mode) if this option is supported by $(AR).
954 954
955 ARCH_CPPFLAGS, ARCH_AFLAGS, ARCH_CFLAGS Overrides the kbuild defaults
956
957 These variables are appended to the KBUILD_CPPFLAGS,
958 KBUILD_AFLAGS, and KBUILD_CFLAGS, respectively, after the
959 top-level Makefile has set any other flags. This provides a
960 means for an architecture to override the defaults.
961
962
955--- 6.2 Add prerequisites to archheaders: 963--- 6.2 Add prerequisites to archheaders:
956 964
957 The archheaders: rule is used to generate header files that 965 The archheaders: rule is used to generate header files that
diff --git a/Documentation/target/tcm_mod_builder.py b/Documentation/target/tcm_mod_builder.py
index 949de191fcdc..cda56df9b8a7 100755
--- a/Documentation/target/tcm_mod_builder.py
+++ b/Documentation/target/tcm_mod_builder.py
@@ -199,7 +199,8 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
199 buf += "#include <linux/string.h>\n" 199 buf += "#include <linux/string.h>\n"
200 buf += "#include <linux/configfs.h>\n" 200 buf += "#include <linux/configfs.h>\n"
201 buf += "#include <linux/ctype.h>\n" 201 buf += "#include <linux/ctype.h>\n"
202 buf += "#include <asm/unaligned.h>\n\n" 202 buf += "#include <asm/unaligned.h>\n"
203 buf += "#include <scsi/scsi_proto.h>\n\n"
203 buf += "#include <target/target_core_base.h>\n" 204 buf += "#include <target/target_core_base.h>\n"
204 buf += "#include <target/target_core_fabric.h>\n" 205 buf += "#include <target/target_core_fabric.h>\n"
205 buf += "#include <target/target_core_fabric_configfs.h>\n" 206 buf += "#include <target/target_core_fabric_configfs.h>\n"
@@ -230,8 +231,14 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
230 buf += " }\n" 231 buf += " }\n"
231 buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n" 232 buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
232 buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n" 233 buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
233 buf += " ret = core_tpg_register(&" + fabric_mod_name + "_ops, wwn,\n" 234
234 buf += " &tpg->se_tpg, SCSI_PROTOCOL_SAS);\n" 235 if proto_ident == "FC":
236 buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP);\n"
237 elif proto_ident == "SAS":
238 buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SAS);\n"
239 elif proto_ident == "iSCSI":
240 buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_ISCSI);\n"
241
235 buf += " if (ret < 0) {\n" 242 buf += " if (ret < 0) {\n"
236 buf += " kfree(tpg);\n" 243 buf += " kfree(tpg);\n"
237 buf += " return NULL;\n" 244 buf += " return NULL;\n"
@@ -292,7 +299,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
292 299
293 buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n" 300 buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
294 buf += " .module = THIS_MODULE,\n" 301 buf += " .module = THIS_MODULE,\n"
295 buf += " .name = " + fabric_mod_name + ",\n" 302 buf += " .name = \"" + fabric_mod_name + "\",\n"
296 buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n" 303 buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
297 buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n" 304 buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
298 buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n" 305 buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
@@ -322,17 +329,17 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
322 buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n" 329 buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
323 buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n" 330 buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
324 buf += "\n" 331 buf += "\n"
325 buf += " .tfc_wwn_attrs = " + fabric_mod_name + "_wwn_attrs;\n" 332 buf += " .tfc_wwn_attrs = " + fabric_mod_name + "_wwn_attrs,\n"
326 buf += "};\n\n" 333 buf += "};\n\n"
327 334
328 buf += "static int __init " + fabric_mod_name + "_init(void)\n" 335 buf += "static int __init " + fabric_mod_name + "_init(void)\n"
329 buf += "{\n" 336 buf += "{\n"
330 buf += " return target_register_template(" + fabric_mod_name + "_ops);\n" 337 buf += " return target_register_template(&" + fabric_mod_name + "_ops);\n"
331 buf += "};\n\n" 338 buf += "};\n\n"
332 339
333 buf += "static void __exit " + fabric_mod_name + "_exit(void)\n" 340 buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
334 buf += "{\n" 341 buf += "{\n"
335 buf += " target_unregister_template(" + fabric_mod_name + "_ops);\n" 342 buf += " target_unregister_template(&" + fabric_mod_name + "_ops);\n"
336 buf += "};\n\n" 343 buf += "};\n\n"
337 344
338 buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n" 345 buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
diff --git a/MAINTAINERS b/MAINTAINERS
index fd6078443083..569568f6644f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -361,11 +361,11 @@ S: Supported
361F: drivers/input/touchscreen/ad7879.c 361F: drivers/input/touchscreen/ad7879.c
362 362
363ADDRESS SPACE LAYOUT RANDOMIZATION (ASLR) 363ADDRESS SPACE LAYOUT RANDOMIZATION (ASLR)
364M: Jiri Kosina <jkosina@suse.cz> 364M: Jiri Kosina <jkosina@suse.com>
365S: Maintained 365S: Maintained
366 366
367ADM1025 HARDWARE MONITOR DRIVER 367ADM1025 HARDWARE MONITOR DRIVER
368M: Jean Delvare <jdelvare@suse.de> 368M: Jean Delvare <jdelvare@suse.com>
369L: lm-sensors@lm-sensors.org 369L: lm-sensors@lm-sensors.org
370S: Maintained 370S: Maintained
371F: Documentation/hwmon/adm1025 371F: Documentation/hwmon/adm1025
@@ -430,7 +430,7 @@ S: Maintained
430F: drivers/macintosh/therm_adt746x.c 430F: drivers/macintosh/therm_adt746x.c
431 431
432ADT7475 HARDWARE MONITOR DRIVER 432ADT7475 HARDWARE MONITOR DRIVER
433M: Jean Delvare <jdelvare@suse.de> 433M: Jean Delvare <jdelvare@suse.com>
434L: lm-sensors@lm-sensors.org 434L: lm-sensors@lm-sensors.org
435S: Maintained 435S: Maintained
436F: Documentation/hwmon/adt7475 436F: Documentation/hwmon/adt7475
@@ -445,7 +445,7 @@ F: drivers/input/misc/adxl34x.c
445 445
446ADVANSYS SCSI DRIVER 446ADVANSYS SCSI DRIVER
447M: Matthew Wilcox <matthew@wil.cx> 447M: Matthew Wilcox <matthew@wil.cx>
448M: Hannes Reinecke <hare@suse.de> 448M: Hannes Reinecke <hare@suse.com>
449L: linux-scsi@vger.kernel.org 449L: linux-scsi@vger.kernel.org
450S: Maintained 450S: Maintained
451F: Documentation/scsi/advansys.txt 451F: Documentation/scsi/advansys.txt
@@ -506,7 +506,7 @@ F: drivers/scsi/aha152x*
506F: drivers/scsi/pcmcia/aha152x* 506F: drivers/scsi/pcmcia/aha152x*
507 507
508AIC7XXX / AIC79XX SCSI DRIVER 508AIC7XXX / AIC79XX SCSI DRIVER
509M: Hannes Reinecke <hare@suse.de> 509M: Hannes Reinecke <hare@suse.com>
510L: linux-scsi@vger.kernel.org 510L: linux-scsi@vger.kernel.org
511S: Maintained 511S: Maintained
512F: drivers/scsi/aic7xxx/ 512F: drivers/scsi/aic7xxx/
@@ -746,7 +746,7 @@ S: Maintained
746F: sound/aoa/ 746F: sound/aoa/
747 747
748APM DRIVER 748APM DRIVER
749M: Jiri Kosina <jkosina@suse.cz> 749M: Jiri Kosina <jkosina@suse.com>
750S: Odd fixes 750S: Odd fixes
751F: arch/x86/kernel/apm_32.c 751F: arch/x86/kernel/apm_32.c
752F: include/linux/apm_bios.h 752F: include/linux/apm_bios.h
@@ -1001,6 +1001,7 @@ ARM/CONEXANT DIGICOLOR MACHINE SUPPORT
1001M: Baruch Siach <baruch@tkos.co.il> 1001M: Baruch Siach <baruch@tkos.co.il>
1002L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1002L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1003S: Maintained 1003S: Maintained
1004F: arch/arm/boot/dts/cx92755*
1004N: digicolor 1005N: digicolor
1005 1006
1006ARM/EBSA110 MACHINE SUPPORT 1007ARM/EBSA110 MACHINE SUPPORT
@@ -1324,7 +1325,7 @@ F: arch/arm/mach-pxa/include/mach/palmtc.h
1324F: arch/arm/mach-pxa/palmtc.c 1325F: arch/arm/mach-pxa/palmtc.c
1325 1326
1326ARM/PALM TREO SUPPORT 1327ARM/PALM TREO SUPPORT
1327M: Tomas Cech <sleep_walker@suse.cz> 1328M: Tomas Cech <sleep_walker@suse.com>
1328L: linux-arm-kernel@lists.infradead.org 1329L: linux-arm-kernel@lists.infradead.org
1329W: http://hackndev.com 1330W: http://hackndev.com
1330S: Maintained 1331S: Maintained
@@ -2405,7 +2406,7 @@ F: drivers/gpio/gpio-bt8xx.c
2405BTRFS FILE SYSTEM 2406BTRFS FILE SYSTEM
2406M: Chris Mason <clm@fb.com> 2407M: Chris Mason <clm@fb.com>
2407M: Josef Bacik <jbacik@fb.com> 2408M: Josef Bacik <jbacik@fb.com>
2408M: David Sterba <dsterba@suse.cz> 2409M: David Sterba <dsterba@suse.com>
2409L: linux-btrfs@vger.kernel.org 2410L: linux-btrfs@vger.kernel.org
2410W: http://btrfs.wiki.kernel.org/ 2411W: http://btrfs.wiki.kernel.org/
2411Q: http://patchwork.kernel.org/project/linux-btrfs/list/ 2412Q: http://patchwork.kernel.org/project/linux-btrfs/list/
@@ -2748,7 +2749,7 @@ COCCINELLE/Semantic Patches (SmPL)
2748M: Julia Lawall <Julia.Lawall@lip6.fr> 2749M: Julia Lawall <Julia.Lawall@lip6.fr>
2749M: Gilles Muller <Gilles.Muller@lip6.fr> 2750M: Gilles Muller <Gilles.Muller@lip6.fr>
2750M: Nicolas Palix <nicolas.palix@imag.fr> 2751M: Nicolas Palix <nicolas.palix@imag.fr>
2751M: Michal Marek <mmarek@suse.cz> 2752M: Michal Marek <mmarek@suse.com>
2752L: cocci@systeme.lip6.fr (moderated for non-subscribers) 2753L: cocci@systeme.lip6.fr (moderated for non-subscribers)
2753T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git misc 2754T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git misc
2754W: http://coccinelle.lip6.fr/ 2755W: http://coccinelle.lip6.fr/
@@ -2864,7 +2865,7 @@ F: kernel/cpuset.c
2864 2865
2865CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG) 2866CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG)
2866M: Johannes Weiner <hannes@cmpxchg.org> 2867M: Johannes Weiner <hannes@cmpxchg.org>
2867M: Michal Hocko <mhocko@suse.cz> 2868M: Michal Hocko <mhocko@kernel.org>
2868L: cgroups@vger.kernel.org 2869L: cgroups@vger.kernel.org
2869L: linux-mm@kvack.org 2870L: linux-mm@kvack.org
2870S: Maintained 2871S: Maintained
@@ -2945,7 +2946,7 @@ F: arch/x86/kernel/cpuid.c
2945F: arch/x86/kernel/msr.c 2946F: arch/x86/kernel/msr.c
2946 2947
2947CPU POWER MONITORING SUBSYSTEM 2948CPU POWER MONITORING SUBSYSTEM
2948M: Thomas Renninger <trenn@suse.de> 2949M: Thomas Renninger <trenn@suse.com>
2949L: linux-pm@vger.kernel.org 2950L: linux-pm@vger.kernel.org
2950S: Maintained 2951S: Maintained
2951F: tools/power/cpupower/ 2952F: tools/power/cpupower/
@@ -3175,7 +3176,7 @@ F: Documentation/networking/dmfe.txt
3175F: drivers/net/ethernet/dec/tulip/dmfe.c 3176F: drivers/net/ethernet/dec/tulip/dmfe.c
3176 3177
3177DC390/AM53C974 SCSI driver 3178DC390/AM53C974 SCSI driver
3178M: Hannes Reinecke <hare@suse.de> 3179M: Hannes Reinecke <hare@suse.com>
3179L: linux-scsi@vger.kernel.org 3180L: linux-scsi@vger.kernel.org
3180S: Maintained 3181S: Maintained
3181F: drivers/scsi/am53c974.c 3182F: drivers/scsi/am53c974.c
@@ -3379,7 +3380,7 @@ W: http://www.win.tue.nl/~aeb/partitions/partition_types-1.html
3379S: Maintained 3380S: Maintained
3380 3381
3381DISKQUOTA 3382DISKQUOTA
3382M: Jan Kara <jack@suse.cz> 3383M: Jan Kara <jack@suse.com>
3383S: Maintained 3384S: Maintained
3384F: Documentation/filesystems/quota.txt 3385F: Documentation/filesystems/quota.txt
3385F: fs/quota/ 3386F: fs/quota/
@@ -3435,7 +3436,7 @@ F: Documentation/hwmon/dme1737
3435F: drivers/hwmon/dme1737.c 3436F: drivers/hwmon/dme1737.c
3436 3437
3437DMI/SMBIOS SUPPORT 3438DMI/SMBIOS SUPPORT
3438M: Jean Delvare <jdelvare@suse.de> 3439M: Jean Delvare <jdelvare@suse.com>
3439S: Maintained 3440S: Maintained
3440T: quilt http://jdelvare.nerim.net/devel/linux/jdelvare-dmi/ 3441T: quilt http://jdelvare.nerim.net/devel/linux/jdelvare-dmi/
3441F: Documentation/ABI/testing/sysfs-firmware-dmi-tables 3442F: Documentation/ABI/testing/sysfs-firmware-dmi-tables
@@ -3586,6 +3587,15 @@ S: Maintained
3586F: drivers/gpu/drm/rockchip/ 3587F: drivers/gpu/drm/rockchip/
3587F: Documentation/devicetree/bindings/video/rockchip* 3588F: Documentation/devicetree/bindings/video/rockchip*
3588 3589
3590DRM DRIVERS FOR STI
3591M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
3592M: Vincent Abriou <vincent.abriou@st.com>
3593L: dri-devel@lists.freedesktop.org
3594T: git http://git.linaro.org/people/benjamin.gaignard/kernel.git
3595S: Maintained
3596F: drivers/gpu/drm/sti
3597F: Documentation/devicetree/bindings/gpu/st,stih4xx.txt
3598
3589DSBR100 USB FM RADIO DRIVER 3599DSBR100 USB FM RADIO DRIVER
3590M: Alexey Klimov <klimov.linux@gmail.com> 3600M: Alexey Klimov <klimov.linux@gmail.com>
3591L: linux-media@vger.kernel.org 3601L: linux-media@vger.kernel.org
@@ -4051,7 +4061,7 @@ F: drivers/of/of_mdio.c
4051F: drivers/of/of_net.c 4061F: drivers/of/of_net.c
4052 4062
4053EXT2 FILE SYSTEM 4063EXT2 FILE SYSTEM
4054M: Jan Kara <jack@suse.cz> 4064M: Jan Kara <jack@suse.com>
4055L: linux-ext4@vger.kernel.org 4065L: linux-ext4@vger.kernel.org
4056S: Maintained 4066S: Maintained
4057F: Documentation/filesystems/ext2.txt 4067F: Documentation/filesystems/ext2.txt
@@ -4059,7 +4069,7 @@ F: fs/ext2/
4059F: include/linux/ext2* 4069F: include/linux/ext2*
4060 4070
4061EXT3 FILE SYSTEM 4071EXT3 FILE SYSTEM
4062M: Jan Kara <jack@suse.cz> 4072M: Jan Kara <jack@suse.com>
4063M: Andrew Morton <akpm@linux-foundation.org> 4073M: Andrew Morton <akpm@linux-foundation.org>
4064M: Andreas Dilger <adilger.kernel@dilger.ca> 4074M: Andreas Dilger <adilger.kernel@dilger.ca>
4065L: linux-ext4@vger.kernel.org 4075L: linux-ext4@vger.kernel.org
@@ -4109,7 +4119,7 @@ F: drivers/video/fbdev/exynos/exynos_mipi*
4109F: include/video/exynos_mipi* 4119F: include/video/exynos_mipi*
4110 4120
4111F71805F HARDWARE MONITORING DRIVER 4121F71805F HARDWARE MONITORING DRIVER
4112M: Jean Delvare <jdelvare@suse.de> 4122M: Jean Delvare <jdelvare@suse.com>
4113L: lm-sensors@lm-sensors.org 4123L: lm-sensors@lm-sensors.org
4114S: Maintained 4124S: Maintained
4115F: Documentation/hwmon/f71805f 4125F: Documentation/hwmon/f71805f
@@ -4244,7 +4254,7 @@ S: Maintained
4244F: drivers/block/rsxx/ 4254F: drivers/block/rsxx/
4245 4255
4246FLOPPY DRIVER 4256FLOPPY DRIVER
4247M: Jiri Kosina <jkosina@suse.cz> 4257M: Jiri Kosina <jkosina@suse.com>
4248T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/floppy.git 4258T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/floppy.git
4249S: Odd fixes 4259S: Odd fixes
4250F: drivers/block/floppy.c 4260F: drivers/block/floppy.c
@@ -4665,7 +4675,7 @@ F: drivers/media/usb/stk1160/
4665 4675
4666H8/300 ARCHITECTURE 4676H8/300 ARCHITECTURE
4667M: Yoshinori Sato <ysato@users.sourceforge.jp> 4677M: Yoshinori Sato <ysato@users.sourceforge.jp>
4668L: uclinux-h8-devel@lists.sourceforge.jp 4678L: uclinux-h8-devel@lists.sourceforge.jp (moderated for non-subscribers)
4669W: http://uclinux-h8.sourceforge.jp 4679W: http://uclinux-h8.sourceforge.jp
4670T: git git://git.sourceforge.jp/gitroot/uclinux-h8/linux.git 4680T: git git://git.sourceforge.jp/gitroot/uclinux-h8/linux.git
4671S: Maintained 4681S: Maintained
@@ -4712,7 +4722,7 @@ S: Maintained
4712F: drivers/media/usb/hackrf/ 4722F: drivers/media/usb/hackrf/
4713 4723
4714HARDWARE MONITORING 4724HARDWARE MONITORING
4715M: Jean Delvare <jdelvare@suse.de> 4725M: Jean Delvare <jdelvare@suse.com>
4716M: Guenter Roeck <linux@roeck-us.net> 4726M: Guenter Roeck <linux@roeck-us.net>
4717L: lm-sensors@lm-sensors.org 4727L: lm-sensors@lm-sensors.org
4718W: http://www.lm-sensors.org/ 4728W: http://www.lm-sensors.org/
@@ -4815,7 +4825,7 @@ F: include/linux/pm.h
4815F: arch/*/include/asm/suspend*.h 4825F: arch/*/include/asm/suspend*.h
4816 4826
4817HID CORE LAYER 4827HID CORE LAYER
4818M: Jiri Kosina <jkosina@suse.cz> 4828M: Jiri Kosina <jkosina@suse.com>
4819L: linux-input@vger.kernel.org 4829L: linux-input@vger.kernel.org
4820T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid.git 4830T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid.git
4821S: Maintained 4831S: Maintained
@@ -4824,7 +4834,7 @@ F: include/linux/hid*
4824F: include/uapi/linux/hid* 4834F: include/uapi/linux/hid*
4825 4835
4826HID SENSOR HUB DRIVERS 4836HID SENSOR HUB DRIVERS
4827M: Jiri Kosina <jkosina@suse.cz> 4837M: Jiri Kosina <jkosina@suse.com>
4828M: Jonathan Cameron <jic23@kernel.org> 4838M: Jonathan Cameron <jic23@kernel.org>
4829M: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com> 4839M: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
4830L: linux-input@vger.kernel.org 4840L: linux-input@vger.kernel.org
@@ -4958,7 +4968,7 @@ F: include/linux/hyperv.h
4958F: tools/hv/ 4968F: tools/hv/
4959 4969
4960I2C OVER PARALLEL PORT 4970I2C OVER PARALLEL PORT
4961M: Jean Delvare <jdelvare@suse.de> 4971M: Jean Delvare <jdelvare@suse.com>
4962L: linux-i2c@vger.kernel.org 4972L: linux-i2c@vger.kernel.org
4963S: Maintained 4973S: Maintained
4964F: Documentation/i2c/busses/i2c-parport 4974F: Documentation/i2c/busses/i2c-parport
@@ -4967,7 +4977,7 @@ F: drivers/i2c/busses/i2c-parport.c
4967F: drivers/i2c/busses/i2c-parport-light.c 4977F: drivers/i2c/busses/i2c-parport-light.c
4968 4978
4969I2C/SMBUS CONTROLLER DRIVERS FOR PC 4979I2C/SMBUS CONTROLLER DRIVERS FOR PC
4970M: Jean Delvare <jdelvare@suse.de> 4980M: Jean Delvare <jdelvare@suse.com>
4971L: linux-i2c@vger.kernel.org 4981L: linux-i2c@vger.kernel.org
4972S: Maintained 4982S: Maintained
4973F: Documentation/i2c/busses/i2c-ali1535 4983F: Documentation/i2c/busses/i2c-ali1535
@@ -5008,7 +5018,7 @@ F: drivers/i2c/busses/i2c-ismt.c
5008F: Documentation/i2c/busses/i2c-ismt 5018F: Documentation/i2c/busses/i2c-ismt
5009 5019
5010I2C/SMBUS STUB DRIVER 5020I2C/SMBUS STUB DRIVER
5011M: Jean Delvare <jdelvare@suse.de> 5021M: Jean Delvare <jdelvare@suse.com>
5012L: linux-i2c@vger.kernel.org 5022L: linux-i2c@vger.kernel.org
5013S: Maintained 5023S: Maintained
5014F: drivers/i2c/i2c-stub.c 5024F: drivers/i2c/i2c-stub.c
@@ -5035,7 +5045,7 @@ L: linux-acpi@vger.kernel.org
5035S: Maintained 5045S: Maintained
5036 5046
5037I2C-TAOS-EVM DRIVER 5047I2C-TAOS-EVM DRIVER
5038M: Jean Delvare <jdelvare@suse.de> 5048M: Jean Delvare <jdelvare@suse.com>
5039L: linux-i2c@vger.kernel.org 5049L: linux-i2c@vger.kernel.org
5040S: Maintained 5050S: Maintained
5041F: Documentation/i2c/busses/i2c-taos-evm 5051F: Documentation/i2c/busses/i2c-taos-evm
@@ -5564,8 +5574,8 @@ F: include/uapi/linux/ip_vs.h
5564F: net/netfilter/ipvs/ 5574F: net/netfilter/ipvs/
5565 5575
5566IPWIRELESS DRIVER 5576IPWIRELESS DRIVER
5567M: Jiri Kosina <jkosina@suse.cz> 5577M: Jiri Kosina <jkosina@suse.com>
5568M: David Sterba <dsterba@suse.cz> 5578M: David Sterba <dsterba@suse.com>
5569S: Odd Fixes 5579S: Odd Fixes
5570F: drivers/tty/ipwireless/ 5580F: drivers/tty/ipwireless/
5571 5581
@@ -5599,6 +5609,7 @@ F: kernel/irq/
5599IRQCHIP DRIVERS 5609IRQCHIP DRIVERS
5600M: Thomas Gleixner <tglx@linutronix.de> 5610M: Thomas Gleixner <tglx@linutronix.de>
5601M: Jason Cooper <jason@lakedaemon.net> 5611M: Jason Cooper <jason@lakedaemon.net>
5612M: Marc Zyngier <marc.zyngier@arm.com>
5602L: linux-kernel@vger.kernel.org 5613L: linux-kernel@vger.kernel.org
5603S: Maintained 5614S: Maintained
5604T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core 5615T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
@@ -5607,11 +5618,14 @@ F: Documentation/devicetree/bindings/interrupt-controller/
5607F: drivers/irqchip/ 5618F: drivers/irqchip/
5608 5619
5609IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY) 5620IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY)
5610M: Benjamin Herrenschmidt <benh@kernel.crashing.org> 5621M: Jiang Liu <jiang.liu@linux.intel.com>
5622M: Marc Zyngier <marc.zyngier@arm.com>
5611S: Maintained 5623S: Maintained
5624T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
5612F: Documentation/IRQ-domain.txt 5625F: Documentation/IRQ-domain.txt
5613F: include/linux/irqdomain.h 5626F: include/linux/irqdomain.h
5614F: kernel/irq/irqdomain.c 5627F: kernel/irq/irqdomain.c
5628F: kernel/irq/msi.c
5615 5629
5616ISAPNP 5630ISAPNP
5617M: Jaroslav Kysela <perex@perex.cz> 5631M: Jaroslav Kysela <perex@perex.cz>
@@ -5685,7 +5699,7 @@ S: Maintained
5685F: drivers/isdn/hardware/eicon/ 5699F: drivers/isdn/hardware/eicon/
5686 5700
5687IT87 HARDWARE MONITORING DRIVER 5701IT87 HARDWARE MONITORING DRIVER
5688M: Jean Delvare <jdelvare@suse.de> 5702M: Jean Delvare <jdelvare@suse.com>
5689L: lm-sensors@lm-sensors.org 5703L: lm-sensors@lm-sensors.org
5690S: Maintained 5704S: Maintained
5691F: Documentation/hwmon/it87 5705F: Documentation/hwmon/it87
@@ -5752,7 +5766,7 @@ F: include/uapi/linux/jffs2.h
5752 5766
5753JOURNALLING LAYER FOR BLOCK DEVICES (JBD) 5767JOURNALLING LAYER FOR BLOCK DEVICES (JBD)
5754M: Andrew Morton <akpm@linux-foundation.org> 5768M: Andrew Morton <akpm@linux-foundation.org>
5755M: Jan Kara <jack@suse.cz> 5769M: Jan Kara <jack@suse.com>
5756L: linux-ext4@vger.kernel.org 5770L: linux-ext4@vger.kernel.org
5757S: Maintained 5771S: Maintained
5758F: fs/jbd/ 5772F: fs/jbd/
@@ -5816,7 +5830,7 @@ S: Maintained
5816F: fs/autofs4/ 5830F: fs/autofs4/
5817 5831
5818KERNEL BUILD + files below scripts/ (unless maintained elsewhere) 5832KERNEL BUILD + files below scripts/ (unless maintained elsewhere)
5819M: Michal Marek <mmarek@suse.cz> 5833M: Michal Marek <mmarek@suse.com>
5820T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git for-next 5834T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git for-next
5821T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git rc-fixes 5835T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git rc-fixes
5822L: linux-kbuild@vger.kernel.org 5836L: linux-kbuild@vger.kernel.org
@@ -5880,7 +5894,7 @@ F: arch/x86/include/asm/svm.h
5880F: arch/x86/kvm/svm.c 5894F: arch/x86/kvm/svm.c
5881 5895
5882KERNEL VIRTUAL MACHINE (KVM) FOR POWERPC 5896KERNEL VIRTUAL MACHINE (KVM) FOR POWERPC
5883M: Alexander Graf <agraf@suse.de> 5897M: Alexander Graf <agraf@suse.com>
5884L: kvm-ppc@vger.kernel.org 5898L: kvm-ppc@vger.kernel.org
5885W: http://kvm.qumranet.com 5899W: http://kvm.qumranet.com
5886T: git git://github.com/agraf/linux-2.6.git 5900T: git git://github.com/agraf/linux-2.6.git
@@ -5898,7 +5912,6 @@ S: Supported
5898F: Documentation/s390/kvm.txt 5912F: Documentation/s390/kvm.txt
5899F: arch/s390/include/asm/kvm* 5913F: arch/s390/include/asm/kvm*
5900F: arch/s390/kvm/ 5914F: arch/s390/kvm/
5901F: drivers/s390/kvm/
5902 5915
5903KERNEL VIRTUAL MACHINE (KVM) FOR ARM 5916KERNEL VIRTUAL MACHINE (KVM) FOR ARM
5904M: Christoffer Dall <christoffer.dall@linaro.org> 5917M: Christoffer Dall <christoffer.dall@linaro.org>
@@ -6037,7 +6050,7 @@ F: drivers/leds/
6037F: include/linux/leds.h 6050F: include/linux/leds.h
6038 6051
6039LEGACY EEPROM DRIVER 6052LEGACY EEPROM DRIVER
6040M: Jean Delvare <jdelvare@suse.de> 6053M: Jean Delvare <jdelvare@suse.com>
6041S: Maintained 6054S: Maintained
6042F: Documentation/misc-devices/eeprom 6055F: Documentation/misc-devices/eeprom
6043F: drivers/misc/eeprom/eeprom.c 6056F: drivers/misc/eeprom/eeprom.c
@@ -6090,7 +6103,7 @@ F: include/linux/ata.h
6090F: include/linux/libata.h 6103F: include/linux/libata.h
6091 6104
6092LIBATA PATA ARASAN COMPACT FLASH CONTROLLER 6105LIBATA PATA ARASAN COMPACT FLASH CONTROLLER
6093M: Viresh Kumar <viresh.linux@gmail.com> 6106M: Viresh Kumar <vireshk@kernel.org>
6094L: linux-ide@vger.kernel.org 6107L: linux-ide@vger.kernel.org
6095T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git 6108T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
6096S: Maintained 6109S: Maintained
@@ -6251,8 +6264,8 @@ F: drivers/platform/x86/hp_accel.c
6251LIVE PATCHING 6264LIVE PATCHING
6252M: Josh Poimboeuf <jpoimboe@redhat.com> 6265M: Josh Poimboeuf <jpoimboe@redhat.com>
6253M: Seth Jennings <sjenning@redhat.com> 6266M: Seth Jennings <sjenning@redhat.com>
6254M: Jiri Kosina <jkosina@suse.cz> 6267M: Jiri Kosina <jkosina@suse.com>
6255M: Vojtech Pavlik <vojtech@suse.cz> 6268M: Vojtech Pavlik <vojtech@suse.com>
6256S: Maintained 6269S: Maintained
6257F: kernel/livepatch/ 6270F: kernel/livepatch/
6258F: include/linux/livepatch.h 6271F: include/linux/livepatch.h
@@ -6278,21 +6291,21 @@ S: Maintained
6278F: drivers/hwmon/lm73.c 6291F: drivers/hwmon/lm73.c
6279 6292
6280LM78 HARDWARE MONITOR DRIVER 6293LM78 HARDWARE MONITOR DRIVER
6281M: Jean Delvare <jdelvare@suse.de> 6294M: Jean Delvare <jdelvare@suse.com>
6282L: lm-sensors@lm-sensors.org 6295L: lm-sensors@lm-sensors.org
6283S: Maintained 6296S: Maintained
6284F: Documentation/hwmon/lm78 6297F: Documentation/hwmon/lm78
6285F: drivers/hwmon/lm78.c 6298F: drivers/hwmon/lm78.c
6286 6299
6287LM83 HARDWARE MONITOR DRIVER 6300LM83 HARDWARE MONITOR DRIVER
6288M: Jean Delvare <jdelvare@suse.de> 6301M: Jean Delvare <jdelvare@suse.com>
6289L: lm-sensors@lm-sensors.org 6302L: lm-sensors@lm-sensors.org
6290S: Maintained 6303S: Maintained
6291F: Documentation/hwmon/lm83 6304F: Documentation/hwmon/lm83
6292F: drivers/hwmon/lm83.c 6305F: drivers/hwmon/lm83.c
6293 6306
6294LM90 HARDWARE MONITOR DRIVER 6307LM90 HARDWARE MONITOR DRIVER
6295M: Jean Delvare <jdelvare@suse.de> 6308M: Jean Delvare <jdelvare@suse.com>
6296L: lm-sensors@lm-sensors.org 6309L: lm-sensors@lm-sensors.org
6297S: Maintained 6310S: Maintained
6298F: Documentation/hwmon/lm90 6311F: Documentation/hwmon/lm90
@@ -6838,6 +6851,12 @@ T: git git://linuxtv.org/anttip/media_tree.git
6838S: Maintained 6851S: Maintained
6839F: drivers/media/usb/msi2500/ 6852F: drivers/media/usb/msi2500/
6840 6853
6854MSYSTEMS DISKONCHIP G3 MTD DRIVER
6855M: Robert Jarzmik <robert.jarzmik@free.fr>
6856L: linux-mtd@lists.infradead.org
6857S: Maintained
6858F: drivers/mtd/devices/docg3*
6859
6841MT9M032 APTINA SENSOR DRIVER 6860MT9M032 APTINA SENSOR DRIVER
6842M: Laurent Pinchart <laurent.pinchart@ideasonboard.com> 6861M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
6843L: linux-media@vger.kernel.org 6862L: linux-media@vger.kernel.org
@@ -7019,6 +7038,7 @@ F: include/uapi/linux/netfilter/
7019F: net/*/netfilter.c 7038F: net/*/netfilter.c
7020F: net/*/netfilter/ 7039F: net/*/netfilter/
7021F: net/netfilter/ 7040F: net/netfilter/
7041F: net/bridge/br_netfilter*.c
7022 7042
7023NETLABEL 7043NETLABEL
7024M: Paul Moore <paul@paul-moore.com> 7044M: Paul Moore <paul@paul-moore.com>
@@ -7718,7 +7738,7 @@ S: Maintained
7718F: drivers/char/pc8736x_gpio.c 7738F: drivers/char/pc8736x_gpio.c
7719 7739
7720PC87427 HARDWARE MONITORING DRIVER 7740PC87427 HARDWARE MONITORING DRIVER
7721M: Jean Delvare <jdelvare@suse.de> 7741M: Jean Delvare <jdelvare@suse.com>
7722L: lm-sensors@lm-sensors.org 7742L: lm-sensors@lm-sensors.org
7723S: Maintained 7743S: Maintained
7724F: Documentation/hwmon/pc87427 7744F: Documentation/hwmon/pc87427
@@ -7995,7 +8015,7 @@ S: Maintained
7995F: drivers/pinctrl/samsung/ 8015F: drivers/pinctrl/samsung/
7996 8016
7997PIN CONTROLLER - ST SPEAR 8017PIN CONTROLLER - ST SPEAR
7998M: Viresh Kumar <viresh.linux@gmail.com> 8018M: Viresh Kumar <vireshk@kernel.org>
7999L: spear-devel@list.st.com 8019L: spear-devel@list.st.com
8000L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 8020L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
8001W: http://www.st.com/spear 8021W: http://www.st.com/spear
@@ -8003,7 +8023,7 @@ S: Maintained
8003F: drivers/pinctrl/spear/ 8023F: drivers/pinctrl/spear/
8004 8024
8005PKTCDVD DRIVER 8025PKTCDVD DRIVER
8006M: Jiri Kosina <jkosina@suse.cz> 8026M: Jiri Kosina <jkosina@suse.com>
8007S: Maintained 8027S: Maintained
8008F: drivers/block/pktcdvd.c 8028F: drivers/block/pktcdvd.c
8009F: include/linux/pktcdvd.h 8029F: include/linux/pktcdvd.h
@@ -8894,7 +8914,7 @@ S: Maintained
8894F: drivers/tty/serial/ 8914F: drivers/tty/serial/
8895 8915
8896SYNOPSYS DESIGNWARE DMAC DRIVER 8916SYNOPSYS DESIGNWARE DMAC DRIVER
8897M: Viresh Kumar <viresh.linux@gmail.com> 8917M: Viresh Kumar <vireshk@kernel.org>
8898M: Andy Shevchenko <andriy.shevchenko@linux.intel.com> 8918M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
8899S: Maintained 8919S: Maintained
8900F: include/linux/dma/dw.h 8920F: include/linux/dma/dw.h
@@ -9061,7 +9081,7 @@ S: Maintained
9061F: drivers/mmc/host/sdhci-s3c* 9081F: drivers/mmc/host/sdhci-s3c*
9062 9082
9063SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) ST SPEAR DRIVER 9083SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) ST SPEAR DRIVER
9064M: Viresh Kumar <viresh.linux@gmail.com> 9084M: Viresh Kumar <vireshk@kernel.org>
9065L: spear-devel@list.st.com 9085L: spear-devel@list.st.com
9066L: linux-mmc@vger.kernel.org 9086L: linux-mmc@vger.kernel.org
9067S: Maintained 9087S: Maintained
@@ -9423,7 +9443,7 @@ F: Documentation/hwmon/sch5627
9423F: drivers/hwmon/sch5627.c 9443F: drivers/hwmon/sch5627.c
9424 9444
9425SMSC47B397 HARDWARE MONITOR DRIVER 9445SMSC47B397 HARDWARE MONITOR DRIVER
9426M: Jean Delvare <jdelvare@suse.de> 9446M: Jean Delvare <jdelvare@suse.com>
9427L: lm-sensors@lm-sensors.org 9447L: lm-sensors@lm-sensors.org
9428S: Maintained 9448S: Maintained
9429F: Documentation/hwmon/smsc47b397 9449F: Documentation/hwmon/smsc47b397
@@ -9472,7 +9492,7 @@ S: Supported
9472F: drivers/media/pci/solo6x10/ 9492F: drivers/media/pci/solo6x10/
9473 9493
9474SOFTWARE RAID (Multiple Disks) SUPPORT 9494SOFTWARE RAID (Multiple Disks) SUPPORT
9475M: Neil Brown <neilb@suse.de> 9495M: Neil Brown <neilb@suse.com>
9476L: linux-raid@vger.kernel.org 9496L: linux-raid@vger.kernel.org
9477S: Supported 9497S: Supported
9478F: drivers/md/ 9498F: drivers/md/
@@ -9515,7 +9535,7 @@ F: drivers/memstick/core/ms_block.*
9515 9535
9516SOUND 9536SOUND
9517M: Jaroslav Kysela <perex@perex.cz> 9537M: Jaroslav Kysela <perex@perex.cz>
9518M: Takashi Iwai <tiwai@suse.de> 9538M: Takashi Iwai <tiwai@suse.com>
9519L: alsa-devel@alsa-project.org (moderated for non-subscribers) 9539L: alsa-devel@alsa-project.org (moderated for non-subscribers)
9520W: http://www.alsa-project.org/ 9540W: http://www.alsa-project.org/
9521T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git 9541T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
@@ -9599,7 +9619,7 @@ S: Maintained
9599F: include/linux/compiler.h 9619F: include/linux/compiler.h
9600 9620
9601SPEAR PLATFORM SUPPORT 9621SPEAR PLATFORM SUPPORT
9602M: Viresh Kumar <viresh.linux@gmail.com> 9622M: Viresh Kumar <vireshk@kernel.org>
9603M: Shiraz Hashim <shiraz.linux.kernel@gmail.com> 9623M: Shiraz Hashim <shiraz.linux.kernel@gmail.com>
9604L: spear-devel@list.st.com 9624L: spear-devel@list.st.com
9605L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 9625L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -9608,7 +9628,7 @@ S: Maintained
9608F: arch/arm/mach-spear/ 9628F: arch/arm/mach-spear/
9609 9629
9610SPEAR CLOCK FRAMEWORK SUPPORT 9630SPEAR CLOCK FRAMEWORK SUPPORT
9611M: Viresh Kumar <viresh.linux@gmail.com> 9631M: Viresh Kumar <vireshk@kernel.org>
9612L: spear-devel@list.st.com 9632L: spear-devel@list.st.com
9613L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 9633L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
9614W: http://www.st.com/spear 9634W: http://www.st.com/spear
@@ -10398,7 +10418,7 @@ K: ^Subject:.*(?i)trivial
10398 10418
10399TTY LAYER 10419TTY LAYER
10400M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 10420M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
10401M: Jiri Slaby <jslaby@suse.cz> 10421M: Jiri Slaby <jslaby@suse.com>
10402S: Supported 10422S: Supported
10403T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty.git 10423T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty.git
10404F: Documentation/serial/ 10424F: Documentation/serial/
@@ -10472,7 +10492,7 @@ F: arch/m68k/*/*_no.*
10472F: arch/m68k/include/asm/*_no.* 10492F: arch/m68k/include/asm/*_no.*
10473 10493
10474UDF FILESYSTEM 10494UDF FILESYSTEM
10475M: Jan Kara <jack@suse.cz> 10495M: Jan Kara <jack@suse.com>
10476S: Maintained 10496S: Maintained
10477F: Documentation/filesystems/udf.txt 10497F: Documentation/filesystems/udf.txt
10478F: fs/udf/ 10498F: fs/udf/
@@ -10615,7 +10635,7 @@ F: drivers/usb/gadget/
10615F: include/linux/usb/gadget* 10635F: include/linux/usb/gadget*
10616 10636
10617USB HID/HIDBP DRIVERS (USB KEYBOARDS, MICE, REMOTE CONTROLS, ...) 10637USB HID/HIDBP DRIVERS (USB KEYBOARDS, MICE, REMOTE CONTROLS, ...)
10618M: Jiri Kosina <jkosina@suse.cz> 10638M: Jiri Kosina <jkosina@suse.com>
10619L: linux-usb@vger.kernel.org 10639L: linux-usb@vger.kernel.org
10620T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid.git 10640T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid.git
10621S: Maintained 10641S: Maintained
@@ -10740,7 +10760,7 @@ S: Maintained
10740F: drivers/usb/host/uhci* 10760F: drivers/usb/host/uhci*
10741 10761
10742USB "USBNET" DRIVER FRAMEWORK 10762USB "USBNET" DRIVER FRAMEWORK
10743M: Oliver Neukum <oneukum@suse.de> 10763M: Oliver Neukum <oneukum@suse.com>
10744L: netdev@vger.kernel.org 10764L: netdev@vger.kernel.org
10745W: http://www.linux-usb.org/usbnet 10765W: http://www.linux-usb.org/usbnet
10746S: Maintained 10766S: Maintained
@@ -10894,6 +10914,15 @@ F: drivers/block/virtio_blk.c
10894F: include/linux/virtio_*.h 10914F: include/linux/virtio_*.h
10895F: include/uapi/linux/virtio_*.h 10915F: include/uapi/linux/virtio_*.h
10896 10916
10917VIRTIO DRIVERS FOR S390
10918M: Christian Borntraeger <borntraeger@de.ibm.com>
10919M: Cornelia Huck <cornelia.huck@de.ibm.com>
10920L: linux-s390@vger.kernel.org
10921L: virtualization@lists.linux-foundation.org
10922L: kvm@vger.kernel.org
10923S: Supported
10924F: drivers/s390/virtio/
10925
10897VIRTIO GPU DRIVER 10926VIRTIO GPU DRIVER
10898M: David Airlie <airlied@linux.ie> 10927M: David Airlie <airlied@linux.ie>
10899M: Gerd Hoffmann <kraxel@redhat.com> 10928M: Gerd Hoffmann <kraxel@redhat.com>
@@ -11067,7 +11096,7 @@ F: Documentation/hwmon/w83793
11067F: drivers/hwmon/w83793.c 11096F: drivers/hwmon/w83793.c
11068 11097
11069W83795 HARDWARE MONITORING DRIVER 11098W83795 HARDWARE MONITORING DRIVER
11070M: Jean Delvare <jdelvare@suse.de> 11099M: Jean Delvare <jdelvare@suse.com>
11071L: lm-sensors@lm-sensors.org 11100L: lm-sensors@lm-sensors.org
11072S: Maintained 11101S: Maintained
11073F: drivers/hwmon/w83795.c 11102F: drivers/hwmon/w83795.c
diff --git a/Makefile b/Makefile
index 257ef5892ab7..246053f04fb5 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 2 2PATCHLEVEL = 2
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc2 4EXTRAVERSION = -rc8
5NAME = Hurr durr I'ma sheep 5NAME = Hurr durr I'ma sheep
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
@@ -597,6 +597,11 @@ endif # $(dot-config)
597# Defaults to vmlinux, but the arch makefile usually adds further targets 597# Defaults to vmlinux, but the arch makefile usually adds further targets
598all: vmlinux 598all: vmlinux
599 599
600# The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default
601# values of the respective KBUILD_* variables
602ARCH_CPPFLAGS :=
603ARCH_AFLAGS :=
604ARCH_CFLAGS :=
600include arch/$(SRCARCH)/Makefile 605include arch/$(SRCARCH)/Makefile
601 606
602KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,) 607KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
@@ -780,10 +785,11 @@ endif
780include scripts/Makefile.kasan 785include scripts/Makefile.kasan
781include scripts/Makefile.extrawarn 786include scripts/Makefile.extrawarn
782 787
783# Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments 788# Add any arch overrides and user supplied CPPFLAGS, AFLAGS and CFLAGS as the
784KBUILD_CPPFLAGS += $(KCPPFLAGS) 789# last assignments
785KBUILD_AFLAGS += $(KAFLAGS) 790KBUILD_CPPFLAGS += $(ARCH_CPPFLAGS) $(KCPPFLAGS)
786KBUILD_CFLAGS += $(KCFLAGS) 791KBUILD_AFLAGS += $(ARCH_AFLAGS) $(KAFLAGS)
792KBUILD_CFLAGS += $(ARCH_CFLAGS) $(KCFLAGS)
787 793
788# Use --build-id when available. 794# Use --build-id when available.
789LDFLAGS_BUILD_ID = $(patsubst -Wl$(comma)%,%,\ 795LDFLAGS_BUILD_ID = $(patsubst -Wl$(comma)%,%,\
@@ -847,10 +853,10 @@ export mod_strip_cmd
847mod_compress_cmd = true 853mod_compress_cmd = true
848ifdef CONFIG_MODULE_COMPRESS 854ifdef CONFIG_MODULE_COMPRESS
849 ifdef CONFIG_MODULE_COMPRESS_GZIP 855 ifdef CONFIG_MODULE_COMPRESS_GZIP
850 mod_compress_cmd = gzip -n 856 mod_compress_cmd = gzip -n -f
851 endif # CONFIG_MODULE_COMPRESS_GZIP 857 endif # CONFIG_MODULE_COMPRESS_GZIP
852 ifdef CONFIG_MODULE_COMPRESS_XZ 858 ifdef CONFIG_MODULE_COMPRESS_XZ
853 mod_compress_cmd = xz 859 mod_compress_cmd = xz -f
854 endif # CONFIG_MODULE_COMPRESS_XZ 860 endif # CONFIG_MODULE_COMPRESS_XZ
855endif # CONFIG_MODULE_COMPRESS 861endif # CONFIG_MODULE_COMPRESS
856export mod_compress_cmd 862export mod_compress_cmd
diff --git a/arch/Kconfig b/arch/Kconfig
index bec6666a3cc4..8a8ea7110de8 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -221,6 +221,10 @@ config ARCH_TASK_STRUCT_ALLOCATOR
221config ARCH_THREAD_INFO_ALLOCATOR 221config ARCH_THREAD_INFO_ALLOCATOR
222 bool 222 bool
223 223
224# Select if arch wants to size task_struct dynamically via arch_task_struct_size:
225config ARCH_WANTS_DYNAMIC_TASK_STRUCT
226 bool
227
224config HAVE_REGS_AND_STACK_ACCESS_API 228config HAVE_REGS_AND_STACK_ACCESS_API
225 bool 229 bool
226 help 230 help
diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild
index cde23cd03609..ffd9cf5ec8c4 100644
--- a/arch/alpha/include/asm/Kbuild
+++ b/arch/alpha/include/asm/Kbuild
@@ -5,6 +5,7 @@ generic-y += cputime.h
5generic-y += exec.h 5generic-y += exec.h
6generic-y += irq_work.h 6generic-y += irq_work.h
7generic-y += mcs_spinlock.h 7generic-y += mcs_spinlock.h
8generic-y += mm-arch-hooks.h
8generic-y += preempt.h 9generic-y += preempt.h
9generic-y += sections.h 10generic-y += sections.h
10generic-y += trace_clock.h 11generic-y += trace_clock.h
diff --git a/arch/alpha/include/asm/mm-arch-hooks.h b/arch/alpha/include/asm/mm-arch-hooks.h
deleted file mode 100644
index b07fd862fec3..000000000000
--- a/arch/alpha/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_ALPHA_MM_ARCH_HOOKS_H
13#define _ASM_ALPHA_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_ALPHA_MM_ARCH_HOOKS_H */
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index e7cee0a5c56d..bd4670d1b89b 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -115,6 +115,7 @@ if ISA_ARCOMPACT
115 115
116config ARC_CPU_750D 116config ARC_CPU_750D
117 bool "ARC750D" 117 bool "ARC750D"
118 select ARC_CANT_LLSC
118 help 119 help
119 Support for ARC750 core 120 Support for ARC750 core
120 121
@@ -312,11 +313,11 @@ config ARC_PAGE_SIZE_8K
312 313
313config ARC_PAGE_SIZE_16K 314config ARC_PAGE_SIZE_16K
314 bool "16KB" 315 bool "16KB"
315 depends on ARC_MMU_V3 316 depends on ARC_MMU_V3 || ARC_MMU_V4
316 317
317config ARC_PAGE_SIZE_4K 318config ARC_PAGE_SIZE_4K
318 bool "4KB" 319 bool "4KB"
319 depends on ARC_MMU_V3 320 depends on ARC_MMU_V3 || ARC_MMU_V4
320 321
321endchoice 322endchoice
322 323
@@ -362,7 +363,12 @@ config ARC_CANT_LLSC
362config ARC_HAS_LLSC 363config ARC_HAS_LLSC
363 bool "Insn: LLOCK/SCOND (efficient atomic ops)" 364 bool "Insn: LLOCK/SCOND (efficient atomic ops)"
364 default y 365 default y
365 depends on !ARC_CPU_750D && !ARC_CANT_LLSC 366 depends on !ARC_CANT_LLSC
367
368config ARC_STAR_9000923308
369 bool "Workaround for llock/scond livelock"
370 default y
371 depends on ISA_ARCV2 && SMP && ARC_HAS_LLSC
366 372
367config ARC_HAS_SWAPE 373config ARC_HAS_SWAPE
368 bool "Insn: SWAPE (endian-swap)" 374 bool "Insn: SWAPE (endian-swap)"
@@ -378,6 +384,10 @@ config ARC_HAS_LL64
378 dest operands with 2 possible source operands. 384 dest operands with 2 possible source operands.
379 default y 385 default y
380 386
387config ARC_HAS_DIV_REM
388 bool "Insn: div, divu, rem, remu"
389 default y
390
381config ARC_HAS_RTC 391config ARC_HAS_RTC
382 bool "Local 64-bit r/o cycle counter" 392 bool "Local 64-bit r/o cycle counter"
383 default n 393 default n
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index 6107062c0111..8a27a48304a4 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -36,8 +36,16 @@ cflags-$(atleast_gcc44) += -fsection-anchors
36cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock 36cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock
37cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape 37cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape
38 38
39ifdef CONFIG_ISA_ARCV2
40
39ifndef CONFIG_ARC_HAS_LL64 41ifndef CONFIG_ARC_HAS_LL64
40cflags-$(CONFIG_ISA_ARCV2) += -mno-ll64 42cflags-y += -mno-ll64
43endif
44
45ifndef CONFIG_ARC_HAS_DIV_REM
46cflags-y += -mno-div-rem
47endif
48
41endif 49endif
42 50
43cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables 51cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables
@@ -49,7 +57,8 @@ endif
49 57
50ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE 58ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
51# Generic build system uses -O2, we want -O3 59# Generic build system uses -O2, we want -O3
52cflags-y += -O3 60# Note: No need to add to cflags-y as that happens anyways
61ARCH_CFLAGS += -O3
53endif 62endif
54 63
55# small data is default for elf32 tool-chain. If not usable, disable it 64# small data is default for elf32 tool-chain. If not usable, disable it
diff --git a/arch/arc/boot/dts/axc003.dtsi b/arch/arc/boot/dts/axc003.dtsi
index 15c8d6226c9d..1cd5e82f5dc2 100644
--- a/arch/arc/boot/dts/axc003.dtsi
+++ b/arch/arc/boot/dts/axc003.dtsi
@@ -12,7 +12,7 @@
12 12
13/ { 13/ {
14 compatible = "snps,arc"; 14 compatible = "snps,arc";
15 clock-frequency = <75000000>; 15 clock-frequency = <90000000>;
16 #address-cells = <1>; 16 #address-cells = <1>;
17 #size-cells = <1>; 17 #size-cells = <1>;
18 18
diff --git a/arch/arc/boot/dts/axc003_idu.dtsi b/arch/arc/boot/dts/axc003_idu.dtsi
index 199d42820eca..2f0b33257db2 100644
--- a/arch/arc/boot/dts/axc003_idu.dtsi
+++ b/arch/arc/boot/dts/axc003_idu.dtsi
@@ -12,7 +12,7 @@
12 12
13/ { 13/ {
14 compatible = "snps,arc"; 14 compatible = "snps,arc";
15 clock-frequency = <75000000>; 15 clock-frequency = <90000000>;
16 #address-cells = <1>; 16 #address-cells = <1>;
17 #size-cells = <1>; 17 #size-cells = <1>;
18 18
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
index 1a80cc91a03b..7611b10a2d23 100644
--- a/arch/arc/include/asm/Kbuild
+++ b/arch/arc/include/asm/Kbuild
@@ -22,6 +22,7 @@ generic-y += kvm_para.h
22generic-y += local.h 22generic-y += local.h
23generic-y += local64.h 23generic-y += local64.h
24generic-y += mcs_spinlock.h 24generic-y += mcs_spinlock.h
25generic-y += mm-arch-hooks.h
25generic-y += mman.h 26generic-y += mman.h
26generic-y += msgbuf.h 27generic-y += msgbuf.h
27generic-y += param.h 28generic-y += param.h
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index 070f58827a5c..c8f57b8449dc 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -89,11 +89,10 @@
89#define ECR_C_BIT_DTLB_LD_MISS 8 89#define ECR_C_BIT_DTLB_LD_MISS 8
90#define ECR_C_BIT_DTLB_ST_MISS 9 90#define ECR_C_BIT_DTLB_ST_MISS 9
91 91
92
93/* Auxiliary registers */ 92/* Auxiliary registers */
94#define AUX_IDENTITY 4 93#define AUX_IDENTITY 4
95#define AUX_INTR_VEC_BASE 0x25 94#define AUX_INTR_VEC_BASE 0x25
96 95#define AUX_NON_VOL 0x5e
97 96
98/* 97/*
99 * Floating Pt Registers 98 * Floating Pt Registers
@@ -240,9 +239,9 @@ struct bcr_extn_xymem {
240 239
241struct bcr_perip { 240struct bcr_perip {
242#ifdef CONFIG_CPU_BIG_ENDIAN 241#ifdef CONFIG_CPU_BIG_ENDIAN
243 unsigned int start:8, pad2:8, sz:8, pad:8; 242 unsigned int start:8, pad2:8, sz:8, ver:8;
244#else 243#else
245 unsigned int pad:8, sz:8, pad2:8, start:8; 244 unsigned int ver:8, sz:8, pad2:8, start:8;
246#endif 245#endif
247}; 246};
248 247
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 03484cb4d16d..87d18ae53115 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -23,33 +23,60 @@
23 23
24#define atomic_set(v, i) (((v)->counter) = (i)) 24#define atomic_set(v, i) (((v)->counter) = (i))
25 25
26#ifdef CONFIG_ISA_ARCV2 26#ifdef CONFIG_ARC_STAR_9000923308
27#define PREFETCHW " prefetchw [%1] \n" 27
28#else 28#define SCOND_FAIL_RETRY_VAR_DEF \
29#define PREFETCHW 29 unsigned int delay = 1, tmp; \
30
31#define SCOND_FAIL_RETRY_ASM \
32 " bz 4f \n" \
33 " ; --- scond fail delay --- \n" \
34 " mov %[tmp], %[delay] \n" /* tmp = delay */ \
35 "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
36 " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
37 " rol %[delay], %[delay] \n" /* delay *= 2 */ \
38 " b 1b \n" /* start over */ \
39 "4: ; --- success --- \n" \
40
41#define SCOND_FAIL_RETRY_VARS \
42 ,[delay] "+&r" (delay),[tmp] "=&r" (tmp) \
43
44#else /* !CONFIG_ARC_STAR_9000923308 */
45
46#define SCOND_FAIL_RETRY_VAR_DEF
47
48#define SCOND_FAIL_RETRY_ASM \
49 " bnz 1b \n" \
50
51#define SCOND_FAIL_RETRY_VARS
52
30#endif 53#endif
31 54
32#define ATOMIC_OP(op, c_op, asm_op) \ 55#define ATOMIC_OP(op, c_op, asm_op) \
33static inline void atomic_##op(int i, atomic_t *v) \ 56static inline void atomic_##op(int i, atomic_t *v) \
34{ \ 57{ \
35 unsigned int temp; \ 58 unsigned int val; \
59 SCOND_FAIL_RETRY_VAR_DEF \
36 \ 60 \
37 __asm__ __volatile__( \ 61 __asm__ __volatile__( \
38 "1: \n" \ 62 "1: llock %[val], [%[ctr]] \n" \
39 PREFETCHW \ 63 " " #asm_op " %[val], %[val], %[i] \n" \
40 " llock %0, [%1] \n" \ 64 " scond %[val], [%[ctr]] \n" \
41 " " #asm_op " %0, %0, %2 \n" \ 65 " \n" \
42 " scond %0, [%1] \n" \ 66 SCOND_FAIL_RETRY_ASM \
43 " bnz 1b \n" \ 67 \
44 : "=&r"(temp) /* Early clobber, to prevent reg reuse */ \ 68 : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
45 : "r"(&v->counter), "ir"(i) \ 69 SCOND_FAIL_RETRY_VARS \
70 : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
71 [i] "ir" (i) \
46 : "cc"); \ 72 : "cc"); \
47} \ 73} \
48 74
49#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ 75#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
50static inline int atomic_##op##_return(int i, atomic_t *v) \ 76static inline int atomic_##op##_return(int i, atomic_t *v) \
51{ \ 77{ \
52 unsigned int temp; \ 78 unsigned int val; \
79 SCOND_FAIL_RETRY_VAR_DEF \
53 \ 80 \
54 /* \ 81 /* \
55 * Explicit full memory barrier needed before/after as \ 82 * Explicit full memory barrier needed before/after as \
@@ -58,19 +85,21 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
58 smp_mb(); \ 85 smp_mb(); \
59 \ 86 \
60 __asm__ __volatile__( \ 87 __asm__ __volatile__( \
61 "1: \n" \ 88 "1: llock %[val], [%[ctr]] \n" \
62 PREFETCHW \ 89 " " #asm_op " %[val], %[val], %[i] \n" \
63 " llock %0, [%1] \n" \ 90 " scond %[val], [%[ctr]] \n" \
64 " " #asm_op " %0, %0, %2 \n" \ 91 " \n" \
65 " scond %0, [%1] \n" \ 92 SCOND_FAIL_RETRY_ASM \
66 " bnz 1b \n" \ 93 \
67 : "=&r"(temp) \ 94 : [val] "=&r" (val) \
68 : "r"(&v->counter), "ir"(i) \ 95 SCOND_FAIL_RETRY_VARS \
96 : [ctr] "r" (&v->counter), \
97 [i] "ir" (i) \
69 : "cc"); \ 98 : "cc"); \
70 \ 99 \
71 smp_mb(); \ 100 smp_mb(); \
72 \ 101 \
73 return temp; \ 102 return val; \
74} 103}
75 104
76#else /* !CONFIG_ARC_HAS_LLSC */ 105#else /* !CONFIG_ARC_HAS_LLSC */
@@ -150,6 +179,9 @@ ATOMIC_OP(and, &=, and)
150#undef ATOMIC_OPS 179#undef ATOMIC_OPS
151#undef ATOMIC_OP_RETURN 180#undef ATOMIC_OP_RETURN
152#undef ATOMIC_OP 181#undef ATOMIC_OP
182#undef SCOND_FAIL_RETRY_VAR_DEF
183#undef SCOND_FAIL_RETRY_ASM
184#undef SCOND_FAIL_RETRY_VARS
153 185
154/** 186/**
155 * __atomic_add_unless - add unless the number is a given value 187 * __atomic_add_unless - add unless the number is a given value
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
index 99fe118d3730..57c1f33844d4 100644
--- a/arch/arc/include/asm/bitops.h
+++ b/arch/arc/include/asm/bitops.h
@@ -50,8 +50,7 @@ static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
50 * done for const @nr, but no code is generated due to gcc \ 50 * done for const @nr, but no code is generated due to gcc \
51 * const prop. \ 51 * const prop. \
52 */ \ 52 */ \
53 if (__builtin_constant_p(nr)) \ 53 nr &= 0x1f; \
54 nr &= 0x1f; \
55 \ 54 \
56 __asm__ __volatile__( \ 55 __asm__ __volatile__( \
57 "1: llock %0, [%1] \n" \ 56 "1: llock %0, [%1] \n" \
@@ -82,8 +81,7 @@ static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *
82 \ 81 \
83 m += nr >> 5; \ 82 m += nr >> 5; \
84 \ 83 \
85 if (__builtin_constant_p(nr)) \ 84 nr &= 0x1f; \
86 nr &= 0x1f; \
87 \ 85 \
88 /* \ 86 /* \
89 * Explicit full memory barrier needed before/after as \ 87 * Explicit full memory barrier needed before/after as \
@@ -129,16 +127,13 @@ static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
129 unsigned long temp, flags; \ 127 unsigned long temp, flags; \
130 m += nr >> 5; \ 128 m += nr >> 5; \
131 \ 129 \
132 if (__builtin_constant_p(nr)) \
133 nr &= 0x1f; \
134 \
135 /* \ 130 /* \
136 * spin lock/unlock provide the needed smp_mb() before/after \ 131 * spin lock/unlock provide the needed smp_mb() before/after \
137 */ \ 132 */ \
138 bitops_lock(flags); \ 133 bitops_lock(flags); \
139 \ 134 \
140 temp = *m; \ 135 temp = *m; \
141 *m = temp c_op (1UL << nr); \ 136 *m = temp c_op (1UL << (nr & 0x1f)); \
142 \ 137 \
143 bitops_unlock(flags); \ 138 bitops_unlock(flags); \
144} 139}
@@ -149,17 +144,14 @@ static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *
149 unsigned long old, flags; \ 144 unsigned long old, flags; \
150 m += nr >> 5; \ 145 m += nr >> 5; \
151 \ 146 \
152 if (__builtin_constant_p(nr)) \
153 nr &= 0x1f; \
154 \
155 bitops_lock(flags); \ 147 bitops_lock(flags); \
156 \ 148 \
157 old = *m; \ 149 old = *m; \
158 *m = old c_op (1 << nr); \ 150 *m = old c_op (1UL << (nr & 0x1f)); \
159 \ 151 \
160 bitops_unlock(flags); \ 152 bitops_unlock(flags); \
161 \ 153 \
162 return (old & (1 << nr)) != 0; \ 154 return (old & (1UL << (nr & 0x1f))) != 0; \
163} 155}
164 156
165#endif /* CONFIG_ARC_HAS_LLSC */ 157#endif /* CONFIG_ARC_HAS_LLSC */
@@ -174,11 +166,8 @@ static inline void __##op##_bit(unsigned long nr, volatile unsigned long *m) \
174 unsigned long temp; \ 166 unsigned long temp; \
175 m += nr >> 5; \ 167 m += nr >> 5; \
176 \ 168 \
177 if (__builtin_constant_p(nr)) \
178 nr &= 0x1f; \
179 \
180 temp = *m; \ 169 temp = *m; \
181 *m = temp c_op (1UL << nr); \ 170 *m = temp c_op (1UL << (nr & 0x1f)); \
182} 171}
183 172
184#define __TEST_N_BIT_OP(op, c_op, asm_op) \ 173#define __TEST_N_BIT_OP(op, c_op, asm_op) \
@@ -187,13 +176,10 @@ static inline int __test_and_##op##_bit(unsigned long nr, volatile unsigned long
187 unsigned long old; \ 176 unsigned long old; \
188 m += nr >> 5; \ 177 m += nr >> 5; \
189 \ 178 \
190 if (__builtin_constant_p(nr)) \
191 nr &= 0x1f; \
192 \
193 old = *m; \ 179 old = *m; \
194 *m = old c_op (1 << nr); \ 180 *m = old c_op (1UL << (nr & 0x1f)); \
195 \ 181 \
196 return (old & (1 << nr)) != 0; \ 182 return (old & (1UL << (nr & 0x1f))) != 0; \
197} 183}
198 184
199#define BIT_OPS(op, c_op, asm_op) \ 185#define BIT_OPS(op, c_op, asm_op) \
@@ -224,10 +210,7 @@ test_bit(unsigned int nr, const volatile unsigned long *addr)
224 210
225 addr += nr >> 5; 211 addr += nr >> 5;
226 212
227 if (__builtin_constant_p(nr)) 213 mask = 1UL << (nr & 0x1f);
228 nr &= 0x1f;
229
230 mask = 1 << nr;
231 214
232 return ((mask & *addr) != 0); 215 return ((mask & *addr) != 0);
233} 216}
diff --git a/arch/arc/include/asm/futex.h b/arch/arc/include/asm/futex.h
index 05b5aaf5b0f9..70cfe16b742d 100644
--- a/arch/arc/include/asm/futex.h
+++ b/arch/arc/include/asm/futex.h
@@ -16,12 +16,40 @@
16#include <linux/uaccess.h> 16#include <linux/uaccess.h>
17#include <asm/errno.h> 17#include <asm/errno.h>
18 18
19#ifdef CONFIG_ARC_HAS_LLSC
20
21#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\
22 \
23 __asm__ __volatile__( \
24 "1: llock %1, [%2] \n" \
25 insn "\n" \
26 "2: scond %0, [%2] \n" \
27 " bnz 1b \n" \
28 " mov %0, 0 \n" \
29 "3: \n" \
30 " .section .fixup,\"ax\" \n" \
31 " .align 4 \n" \
32 "4: mov %0, %4 \n" \
33 " b 3b \n" \
34 " .previous \n" \
35 " .section __ex_table,\"a\" \n" \
36 " .align 4 \n" \
37 " .word 1b, 4b \n" \
38 " .word 2b, 4b \n" \
39 " .previous \n" \
40 \
41 : "=&r" (ret), "=&r" (oldval) \
42 : "r" (uaddr), "r" (oparg), "ir" (-EFAULT) \
43 : "cc", "memory")
44
45#else /* !CONFIG_ARC_HAS_LLSC */
46
19#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\ 47#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\
20 \ 48 \
21 __asm__ __volatile__( \ 49 __asm__ __volatile__( \
22 "1: ld %1, [%2] \n" \ 50 "1: ld %1, [%2] \n" \
23 insn "\n" \ 51 insn "\n" \
24 "2: st %0, [%2] \n" \ 52 "2: st %0, [%2] \n" \
25 " mov %0, 0 \n" \ 53 " mov %0, 0 \n" \
26 "3: \n" \ 54 "3: \n" \
27 " .section .fixup,\"ax\" \n" \ 55 " .section .fixup,\"ax\" \n" \
@@ -39,6 +67,8 @@
39 : "r" (uaddr), "r" (oparg), "ir" (-EFAULT) \ 67 : "r" (uaddr), "r" (oparg), "ir" (-EFAULT) \
40 : "cc", "memory") 68 : "cc", "memory")
41 69
70#endif
71
42static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) 72static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
43{ 73{
44 int op = (encoded_op >> 28) & 7; 74 int op = (encoded_op >> 28) & 7;
@@ -123,11 +153,17 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
123 153
124 pagefault_disable(); 154 pagefault_disable();
125 155
126 /* TBD : can use llock/scond */
127 __asm__ __volatile__( 156 __asm__ __volatile__(
128 "1: ld %0, [%3] \n" 157#ifdef CONFIG_ARC_HAS_LLSC
129 " brne %0, %1, 3f \n" 158 "1: llock %0, [%3] \n"
130 "2: st %2, [%3] \n" 159 " brne %0, %1, 3f \n"
160 "2: scond %2, [%3] \n"
161 " bnz 1b \n"
162#else
163 "1: ld %0, [%3] \n"
164 " brne %0, %1, 3f \n"
165 "2: st %2, [%3] \n"
166#endif
131 "3: \n" 167 "3: \n"
132 " .section .fixup,\"ax\" \n" 168 " .section .fixup,\"ax\" \n"
133 "4: mov %0, %4 \n" 169 "4: mov %0, %4 \n"
diff --git a/arch/arc/include/asm/mm-arch-hooks.h b/arch/arc/include/asm/mm-arch-hooks.h
deleted file mode 100644
index c37541c5f8ba..000000000000
--- a/arch/arc/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_ARC_MM_ARCH_HOOKS_H
13#define _ASM_ARC_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_ARC_MM_ARCH_HOOKS_H */
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
index 91755972b9a2..69095da1fcfd 100644
--- a/arch/arc/include/asm/ptrace.h
+++ b/arch/arc/include/asm/ptrace.h
@@ -20,20 +20,20 @@
20struct pt_regs { 20struct pt_regs {
21 21
22 /* Real registers */ 22 /* Real registers */
23 long bta; /* bta_l1, bta_l2, erbta */ 23 unsigned long bta; /* bta_l1, bta_l2, erbta */
24 24
25 long lp_start, lp_end, lp_count; 25 unsigned long lp_start, lp_end, lp_count;
26 26
27 long status32; /* status32_l1, status32_l2, erstatus */ 27 unsigned long status32; /* status32_l1, status32_l2, erstatus */
28 long ret; /* ilink1, ilink2 or eret */ 28 unsigned long ret; /* ilink1, ilink2 or eret */
29 long blink; 29 unsigned long blink;
30 long fp; 30 unsigned long fp;
31 long r26; /* gp */ 31 unsigned long r26; /* gp */
32 32
33 long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0; 33 unsigned long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
34 34
35 long sp; /* user/kernel sp depending on where we came from */ 35 unsigned long sp; /* User/Kernel depending on where we came from */
36 long orig_r0; 36 unsigned long orig_r0;
37 37
38 /* 38 /*
39 * To distinguish bet excp, syscall, irq 39 * To distinguish bet excp, syscall, irq
@@ -55,13 +55,13 @@ struct pt_regs {
55 unsigned long event; 55 unsigned long event;
56 }; 56 };
57 57
58 long user_r25; 58 unsigned long user_r25;
59}; 59};
60#else 60#else
61 61
62struct pt_regs { 62struct pt_regs {
63 63
64 long orig_r0; 64 unsigned long orig_r0;
65 65
66 union { 66 union {
67 struct { 67 struct {
@@ -76,26 +76,26 @@ struct pt_regs {
76 unsigned long event; 76 unsigned long event;
77 }; 77 };
78 78
79 long bta; /* bta_l1, bta_l2, erbta */ 79 unsigned long bta; /* bta_l1, bta_l2, erbta */
80 80
81 long user_r25; 81 unsigned long user_r25;
82 82
83 long r26; /* gp */ 83 unsigned long r26; /* gp */
84 long fp; 84 unsigned long fp;
85 long sp; /* user/kernel sp depending on where we came from */ 85 unsigned long sp; /* user/kernel sp depending on where we came from */
86 86
87 long r12; 87 unsigned long r12;
88 88
89 /*------- Below list auto saved by h/w -----------*/ 89 /*------- Below list auto saved by h/w -----------*/
90 long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11; 90 unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
91 91
92 long blink; 92 unsigned long blink;
93 long lp_end, lp_start, lp_count; 93 unsigned long lp_end, lp_start, lp_count;
94 94
95 long ei, ldi, jli; 95 unsigned long ei, ldi, jli;
96 96
97 long ret; 97 unsigned long ret;
98 long status32; 98 unsigned long status32;
99}; 99};
100 100
101#endif 101#endif
@@ -103,7 +103,7 @@ struct pt_regs {
103/* Callee saved registers - need to be saved only when you are scheduled out */ 103/* Callee saved registers - need to be saved only when you are scheduled out */
104 104
105struct callee_regs { 105struct callee_regs {
106 long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13; 106 unsigned long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
107}; 107};
108 108
109#define instruction_pointer(regs) ((regs)->ret) 109#define instruction_pointer(regs) ((regs)->ret)
@@ -142,7 +142,7 @@ struct callee_regs {
142 142
143static inline long regs_return_value(struct pt_regs *regs) 143static inline long regs_return_value(struct pt_regs *regs)
144{ 144{
145 return regs->r0; 145 return (long)regs->r0;
146} 146}
147 147
148#endif /* !__ASSEMBLY__ */ 148#endif /* !__ASSEMBLY__ */
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
index e1651df6a93d..db8c59d1eaeb 100644
--- a/arch/arc/include/asm/spinlock.h
+++ b/arch/arc/include/asm/spinlock.h
@@ -18,9 +18,518 @@
18#define arch_spin_unlock_wait(x) \ 18#define arch_spin_unlock_wait(x) \
19 do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0) 19 do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
20 20
21#ifdef CONFIG_ARC_HAS_LLSC
22
23/*
24 * A normal LLOCK/SCOND based system, w/o need for livelock workaround
25 */
26#ifndef CONFIG_ARC_STAR_9000923308
27
21static inline void arch_spin_lock(arch_spinlock_t *lock) 28static inline void arch_spin_lock(arch_spinlock_t *lock)
22{ 29{
23 unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__; 30 unsigned int val;
31
32 smp_mb();
33
34 __asm__ __volatile__(
35 "1: llock %[val], [%[slock]] \n"
36 " breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */
37 " scond %[LOCKED], [%[slock]] \n" /* acquire */
38 " bnz 1b \n"
39 " \n"
40 : [val] "=&r" (val)
41 : [slock] "r" (&(lock->slock)),
42 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
43 : "memory", "cc");
44
45 smp_mb();
46}
47
48/* 1 - lock taken successfully */
49static inline int arch_spin_trylock(arch_spinlock_t *lock)
50{
51 unsigned int val, got_it = 0;
52
53 smp_mb();
54
55 __asm__ __volatile__(
56 "1: llock %[val], [%[slock]] \n"
57 " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
58 " scond %[LOCKED], [%[slock]] \n" /* acquire */
59 " bnz 1b \n"
60 " mov %[got_it], 1 \n"
61 "4: \n"
62 " \n"
63 : [val] "=&r" (val),
64 [got_it] "+&r" (got_it)
65 : [slock] "r" (&(lock->slock)),
66 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
67 : "memory", "cc");
68
69 smp_mb();
70
71 return got_it;
72}
73
74static inline void arch_spin_unlock(arch_spinlock_t *lock)
75{
76 smp_mb();
77
78 lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
79
80 smp_mb();
81}
82
83/*
84 * Read-write spinlocks, allowing multiple readers but only one writer.
85 * Unfair locking as Writers could be starved indefinitely by Reader(s)
86 */
87
88static inline void arch_read_lock(arch_rwlock_t *rw)
89{
90 unsigned int val;
91
92 smp_mb();
93
94 /*
95 * zero means writer holds the lock exclusively, deny Reader.
96 * Otherwise grant lock to first/subseq reader
97 *
98 * if (rw->counter > 0) {
99 * rw->counter--;
100 * ret = 1;
101 * }
102 */
103
104 __asm__ __volatile__(
105 "1: llock %[val], [%[rwlock]] \n"
106 " brls %[val], %[WR_LOCKED], 1b\n" /* <= 0: spin while write locked */
107 " sub %[val], %[val], 1 \n" /* reader lock */
108 " scond %[val], [%[rwlock]] \n"
109 " bnz 1b \n"
110 " \n"
111 : [val] "=&r" (val)
112 : [rwlock] "r" (&(rw->counter)),
113 [WR_LOCKED] "ir" (0)
114 : "memory", "cc");
115
116 smp_mb();
117}
118
119/* 1 - lock taken successfully */
120static inline int arch_read_trylock(arch_rwlock_t *rw)
121{
122 unsigned int val, got_it = 0;
123
124 smp_mb();
125
126 __asm__ __volatile__(
127 "1: llock %[val], [%[rwlock]] \n"
128 " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
129 " sub %[val], %[val], 1 \n" /* counter-- */
130 " scond %[val], [%[rwlock]] \n"
131 " bnz 1b \n" /* retry if collided with someone */
132 " mov %[got_it], 1 \n"
133 " \n"
134 "4: ; --- done --- \n"
135
136 : [val] "=&r" (val),
137 [got_it] "+&r" (got_it)
138 : [rwlock] "r" (&(rw->counter)),
139 [WR_LOCKED] "ir" (0)
140 : "memory", "cc");
141
142 smp_mb();
143
144 return got_it;
145}
146
147static inline void arch_write_lock(arch_rwlock_t *rw)
148{
149 unsigned int val;
150
151 smp_mb();
152
153 /*
154 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
155 * deny writer. Otherwise if unlocked grant to writer
156 * Hence the claim that Linux rwlocks are unfair to writers.
157 * (can be starved for an indefinite time by readers).
158 *
159 * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
160 * rw->counter = 0;
161 * ret = 1;
162 * }
163 */
164
165 __asm__ __volatile__(
166 "1: llock %[val], [%[rwlock]] \n"
167 " brne %[val], %[UNLOCKED], 1b \n" /* while !UNLOCKED spin */
168 " mov %[val], %[WR_LOCKED] \n"
169 " scond %[val], [%[rwlock]] \n"
170 " bnz 1b \n"
171 " \n"
172 : [val] "=&r" (val)
173 : [rwlock] "r" (&(rw->counter)),
174 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
175 [WR_LOCKED] "ir" (0)
176 : "memory", "cc");
177
178 smp_mb();
179}
180
181/* 1 - lock taken successfully */
182static inline int arch_write_trylock(arch_rwlock_t *rw)
183{
184 unsigned int val, got_it = 0;
185
186 smp_mb();
187
188 __asm__ __volatile__(
189 "1: llock %[val], [%[rwlock]] \n"
190 " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
191 " mov %[val], %[WR_LOCKED] \n"
192 " scond %[val], [%[rwlock]] \n"
193 " bnz 1b \n" /* retry if collided with someone */
194 " mov %[got_it], 1 \n"
195 " \n"
196 "4: ; --- done --- \n"
197
198 : [val] "=&r" (val),
199 [got_it] "+&r" (got_it)
200 : [rwlock] "r" (&(rw->counter)),
201 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
202 [WR_LOCKED] "ir" (0)
203 : "memory", "cc");
204
205 smp_mb();
206
207 return got_it;
208}
209
210static inline void arch_read_unlock(arch_rwlock_t *rw)
211{
212 unsigned int val;
213
214 smp_mb();
215
216 /*
217 * rw->counter++;
218 */
219 __asm__ __volatile__(
220 "1: llock %[val], [%[rwlock]] \n"
221 " add %[val], %[val], 1 \n"
222 " scond %[val], [%[rwlock]] \n"
223 " bnz 1b \n"
224 " \n"
225 : [val] "=&r" (val)
226 : [rwlock] "r" (&(rw->counter))
227 : "memory", "cc");
228
229 smp_mb();
230}
231
232static inline void arch_write_unlock(arch_rwlock_t *rw)
233{
234 smp_mb();
235
236 rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
237
238 smp_mb();
239}
240
241#else /* CONFIG_ARC_STAR_9000923308 */
242
243/*
244 * HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping
245 * coherency transactions in the SCU. The exclusive line state keeps rotating
246 * among contenting cores leading to a never ending cycle. So break the cycle
247 * by deferring the retry of failed exclusive access (SCOND). The actual delay
248 * needed is function of number of contending cores as well as the unrelated
249 * coherency traffic from other cores. To keep the code simple, start off with
250 * small delay of 1 which would suffice most cases and in case of contention
251 * double the delay. Eventually the delay is sufficient such that the coherency
252 * pipeline is drained, thus a subsequent exclusive access would succeed.
253 */
254
255#define SCOND_FAIL_RETRY_VAR_DEF \
256 unsigned int delay, tmp; \
257
258#define SCOND_FAIL_RETRY_ASM \
259 " ; --- scond fail delay --- \n" \
260 " mov %[tmp], %[delay] \n" /* tmp = delay */ \
261 "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
262 " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
263 " rol %[delay], %[delay] \n" /* delay *= 2 */ \
264 " b 1b \n" /* start over */ \
265 " \n" \
266 "4: ; --- done --- \n" \
267
268#define SCOND_FAIL_RETRY_VARS \
269 ,[delay] "=&r" (delay), [tmp] "=&r" (tmp) \
270
271static inline void arch_spin_lock(arch_spinlock_t *lock)
272{
273 unsigned int val;
274 SCOND_FAIL_RETRY_VAR_DEF;
275
276 smp_mb();
277
278 __asm__ __volatile__(
279 "0: mov %[delay], 1 \n"
280 "1: llock %[val], [%[slock]] \n"
281 " breq %[val], %[LOCKED], 0b \n" /* spin while LOCKED */
282 " scond %[LOCKED], [%[slock]] \n" /* acquire */
283 " bz 4f \n" /* done */
284 " \n"
285 SCOND_FAIL_RETRY_ASM
286
287 : [val] "=&r" (val)
288 SCOND_FAIL_RETRY_VARS
289 : [slock] "r" (&(lock->slock)),
290 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
291 : "memory", "cc");
292
293 smp_mb();
294}
295
296/* 1 - lock taken successfully */
297static inline int arch_spin_trylock(arch_spinlock_t *lock)
298{
299 unsigned int val, got_it = 0;
300 SCOND_FAIL_RETRY_VAR_DEF;
301
302 smp_mb();
303
304 __asm__ __volatile__(
305 "0: mov %[delay], 1 \n"
306 "1: llock %[val], [%[slock]] \n"
307 " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
308 " scond %[LOCKED], [%[slock]] \n" /* acquire */
309 " bz.d 4f \n"
310 " mov.z %[got_it], 1 \n" /* got it */
311 " \n"
312 SCOND_FAIL_RETRY_ASM
313
314 : [val] "=&r" (val),
315 [got_it] "+&r" (got_it)
316 SCOND_FAIL_RETRY_VARS
317 : [slock] "r" (&(lock->slock)),
318 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
319 : "memory", "cc");
320
321 smp_mb();
322
323 return got_it;
324}
325
326static inline void arch_spin_unlock(arch_spinlock_t *lock)
327{
328 smp_mb();
329
330 lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
331
332 smp_mb();
333}
334
335/*
336 * Read-write spinlocks, allowing multiple readers but only one writer.
337 * Unfair locking as Writers could be starved indefinitely by Reader(s)
338 */
339
340static inline void arch_read_lock(arch_rwlock_t *rw)
341{
342 unsigned int val;
343 SCOND_FAIL_RETRY_VAR_DEF;
344
345 smp_mb();
346
347 /*
348 * zero means writer holds the lock exclusively, deny Reader.
349 * Otherwise grant lock to first/subseq reader
350 *
351 * if (rw->counter > 0) {
352 * rw->counter--;
353 * ret = 1;
354 * }
355 */
356
357 __asm__ __volatile__(
358 "0: mov %[delay], 1 \n"
359 "1: llock %[val], [%[rwlock]] \n"
360 " brls %[val], %[WR_LOCKED], 0b\n" /* <= 0: spin while write locked */
361 " sub %[val], %[val], 1 \n" /* reader lock */
362 " scond %[val], [%[rwlock]] \n"
363 " bz 4f \n" /* done */
364 " \n"
365 SCOND_FAIL_RETRY_ASM
366
367 : [val] "=&r" (val)
368 SCOND_FAIL_RETRY_VARS
369 : [rwlock] "r" (&(rw->counter)),
370 [WR_LOCKED] "ir" (0)
371 : "memory", "cc");
372
373 smp_mb();
374}
375
376/* 1 - lock taken successfully */
377static inline int arch_read_trylock(arch_rwlock_t *rw)
378{
379 unsigned int val, got_it = 0;
380 SCOND_FAIL_RETRY_VAR_DEF;
381
382 smp_mb();
383
384 __asm__ __volatile__(
385 "0: mov %[delay], 1 \n"
386 "1: llock %[val], [%[rwlock]] \n"
387 " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
388 " sub %[val], %[val], 1 \n" /* counter-- */
389 " scond %[val], [%[rwlock]] \n"
390 " bz.d 4f \n"
391 " mov.z %[got_it], 1 \n" /* got it */
392 " \n"
393 SCOND_FAIL_RETRY_ASM
394
395 : [val] "=&r" (val),
396 [got_it] "+&r" (got_it)
397 SCOND_FAIL_RETRY_VARS
398 : [rwlock] "r" (&(rw->counter)),
399 [WR_LOCKED] "ir" (0)
400 : "memory", "cc");
401
402 smp_mb();
403
404 return got_it;
405}
406
407static inline void arch_write_lock(arch_rwlock_t *rw)
408{
409 unsigned int val;
410 SCOND_FAIL_RETRY_VAR_DEF;
411
412 smp_mb();
413
414 /*
415 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
416 * deny writer. Otherwise if unlocked grant to writer
417 * Hence the claim that Linux rwlocks are unfair to writers.
418 * (can be starved for an indefinite time by readers).
419 *
420 * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
421 * rw->counter = 0;
422 * ret = 1;
423 * }
424 */
425
426 __asm__ __volatile__(
427 "0: mov %[delay], 1 \n"
428 "1: llock %[val], [%[rwlock]] \n"
429 " brne %[val], %[UNLOCKED], 0b \n" /* while !UNLOCKED spin */
430 " mov %[val], %[WR_LOCKED] \n"
431 " scond %[val], [%[rwlock]] \n"
432 " bz 4f \n"
433 " \n"
434 SCOND_FAIL_RETRY_ASM
435
436 : [val] "=&r" (val)
437 SCOND_FAIL_RETRY_VARS
438 : [rwlock] "r" (&(rw->counter)),
439 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
440 [WR_LOCKED] "ir" (0)
441 : "memory", "cc");
442
443 smp_mb();
444}
445
446/* 1 - lock taken successfully */
447static inline int arch_write_trylock(arch_rwlock_t *rw)
448{
449 unsigned int val, got_it = 0;
450 SCOND_FAIL_RETRY_VAR_DEF;
451
452 smp_mb();
453
454 __asm__ __volatile__(
455 "0: mov %[delay], 1 \n"
456 "1: llock %[val], [%[rwlock]] \n"
457 " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
458 " mov %[val], %[WR_LOCKED] \n"
459 " scond %[val], [%[rwlock]] \n"
460 " bz.d 4f \n"
461 " mov.z %[got_it], 1 \n" /* got it */
462 " \n"
463 SCOND_FAIL_RETRY_ASM
464
465 : [val] "=&r" (val),
466 [got_it] "+&r" (got_it)
467 SCOND_FAIL_RETRY_VARS
468 : [rwlock] "r" (&(rw->counter)),
469 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
470 [WR_LOCKED] "ir" (0)
471 : "memory", "cc");
472
473 smp_mb();
474
475 return got_it;
476}
477
478static inline void arch_read_unlock(arch_rwlock_t *rw)
479{
480 unsigned int val;
481
482 smp_mb();
483
484 /*
485 * rw->counter++;
486 */
487 __asm__ __volatile__(
488 "1: llock %[val], [%[rwlock]] \n"
489 " add %[val], %[val], 1 \n"
490 " scond %[val], [%[rwlock]] \n"
491 " bnz 1b \n"
492 " \n"
493 : [val] "=&r" (val)
494 : [rwlock] "r" (&(rw->counter))
495 : "memory", "cc");
496
497 smp_mb();
498}
499
500static inline void arch_write_unlock(arch_rwlock_t *rw)
501{
502 unsigned int val;
503
504 smp_mb();
505
506 /*
507 * rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
508 */
509 __asm__ __volatile__(
510 "1: llock %[val], [%[rwlock]] \n"
511 " scond %[UNLOCKED], [%[rwlock]]\n"
512 " bnz 1b \n"
513 " \n"
514 : [val] "=&r" (val)
515 : [rwlock] "r" (&(rw->counter)),
516 [UNLOCKED] "r" (__ARCH_RW_LOCK_UNLOCKED__)
517 : "memory", "cc");
518
519 smp_mb();
520}
521
522#undef SCOND_FAIL_RETRY_VAR_DEF
523#undef SCOND_FAIL_RETRY_ASM
524#undef SCOND_FAIL_RETRY_VARS
525
526#endif /* CONFIG_ARC_STAR_9000923308 */
527
528#else /* !CONFIG_ARC_HAS_LLSC */
529
530static inline void arch_spin_lock(arch_spinlock_t *lock)
531{
532 unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
24 533
25 /* 534 /*
26 * This smp_mb() is technically superfluous, we only need the one 535 * This smp_mb() is technically superfluous, we only need the one
@@ -33,7 +542,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
33 __asm__ __volatile__( 542 __asm__ __volatile__(
34 "1: ex %0, [%1] \n" 543 "1: ex %0, [%1] \n"
35 " breq %0, %2, 1b \n" 544 " breq %0, %2, 1b \n"
36 : "+&r" (tmp) 545 : "+&r" (val)
37 : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__) 546 : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
38 : "memory"); 547 : "memory");
39 548
@@ -48,26 +557,27 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
48 smp_mb(); 557 smp_mb();
49} 558}
50 559
560/* 1 - lock taken successfully */
51static inline int arch_spin_trylock(arch_spinlock_t *lock) 561static inline int arch_spin_trylock(arch_spinlock_t *lock)
52{ 562{
53 unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__; 563 unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
54 564
55 smp_mb(); 565 smp_mb();
56 566
57 __asm__ __volatile__( 567 __asm__ __volatile__(
58 "1: ex %0, [%1] \n" 568 "1: ex %0, [%1] \n"
59 : "+r" (tmp) 569 : "+r" (val)
60 : "r"(&(lock->slock)) 570 : "r"(&(lock->slock))
61 : "memory"); 571 : "memory");
62 572
63 smp_mb(); 573 smp_mb();
64 574
65 return (tmp == __ARCH_SPIN_LOCK_UNLOCKED__); 575 return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
66} 576}
67 577
68static inline void arch_spin_unlock(arch_spinlock_t *lock) 578static inline void arch_spin_unlock(arch_spinlock_t *lock)
69{ 579{
70 unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__; 580 unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
71 581
72 /* 582 /*
73 * RELEASE barrier: given the instructions avail on ARCv2, full barrier 583 * RELEASE barrier: given the instructions avail on ARCv2, full barrier
@@ -77,7 +587,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
77 587
78 __asm__ __volatile__( 588 __asm__ __volatile__(
79 " ex %0, [%1] \n" 589 " ex %0, [%1] \n"
80 : "+r" (tmp) 590 : "+r" (val)
81 : "r"(&(lock->slock)) 591 : "r"(&(lock->slock))
82 : "memory"); 592 : "memory");
83 593
@@ -90,19 +600,12 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
90 600
91/* 601/*
92 * Read-write spinlocks, allowing multiple readers but only one writer. 602 * Read-write spinlocks, allowing multiple readers but only one writer.
603 * Unfair locking as Writers could be starved indefinitely by Reader(s)
93 * 604 *
94 * The spinlock itself is contained in @counter and access to it is 605 * The spinlock itself is contained in @counter and access to it is
95 * serialized with @lock_mutex. 606 * serialized with @lock_mutex.
96 *
97 * Unfair locking as Writers could be starved indefinitely by Reader(s)
98 */ 607 */
99 608
100/* Would read_trylock() succeed? */
101#define arch_read_can_lock(x) ((x)->counter > 0)
102
103/* Would write_trylock() succeed? */
104#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
105
106/* 1 - lock taken successfully */ 609/* 1 - lock taken successfully */
107static inline int arch_read_trylock(arch_rwlock_t *rw) 610static inline int arch_read_trylock(arch_rwlock_t *rw)
108{ 611{
@@ -173,6 +676,11 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
173 arch_spin_unlock(&(rw->lock_mutex)); 676 arch_spin_unlock(&(rw->lock_mutex));
174} 677}
175 678
679#endif
680
681#define arch_read_can_lock(x) ((x)->counter > 0)
682#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
683
176#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 684#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
177#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 685#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
178 686
diff --git a/arch/arc/include/asm/spinlock_types.h b/arch/arc/include/asm/spinlock_types.h
index 662627ced4f2..4e1ef5f650c6 100644
--- a/arch/arc/include/asm/spinlock_types.h
+++ b/arch/arc/include/asm/spinlock_types.h
@@ -26,7 +26,9 @@ typedef struct {
26 */ 26 */
27typedef struct { 27typedef struct {
28 volatile unsigned int counter; 28 volatile unsigned int counter;
29#ifndef CONFIG_ARC_HAS_LLSC
29 arch_spinlock_t lock_mutex; 30 arch_spinlock_t lock_mutex;
31#endif
30} arch_rwlock_t; 32} arch_rwlock_t;
31 33
32#define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000 34#define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000
diff --git a/arch/arc/include/uapi/asm/ptrace.h b/arch/arc/include/uapi/asm/ptrace.h
index 76a7739aab1c..0b3ef63d4a03 100644
--- a/arch/arc/include/uapi/asm/ptrace.h
+++ b/arch/arc/include/uapi/asm/ptrace.h
@@ -32,20 +32,20 @@
32*/ 32*/
33struct user_regs_struct { 33struct user_regs_struct {
34 34
35 long pad; 35 unsigned long pad;
36 struct { 36 struct {
37 long bta, lp_start, lp_end, lp_count; 37 unsigned long bta, lp_start, lp_end, lp_count;
38 long status32, ret, blink, fp, gp; 38 unsigned long status32, ret, blink, fp, gp;
39 long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0; 39 unsigned long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
40 long sp; 40 unsigned long sp;
41 } scratch; 41 } scratch;
42 long pad2; 42 unsigned long pad2;
43 struct { 43 struct {
44 long r25, r24, r23, r22, r21, r20; 44 unsigned long r25, r24, r23, r22, r21, r20;
45 long r19, r18, r17, r16, r15, r14, r13; 45 unsigned long r19, r18, r17, r16, r15, r14, r13;
46 } callee; 46 } callee;
47 long efa; /* break pt addr, for break points in delay slots */ 47 unsigned long efa; /* break pt addr, for break points in delay slots */
48 long stop_pc; /* give dbg stop_pc after ensuring brkpt trap */ 48 unsigned long stop_pc; /* give dbg stop_pc after ensuring brkpt trap */
49}; 49};
50#endif /* !__ASSEMBLY__ */ 50#endif /* !__ASSEMBLY__ */
51 51
diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c
index 6208c630abed..26c156827479 100644
--- a/arch/arc/kernel/intc-arcv2.c
+++ b/arch/arc/kernel/intc-arcv2.c
@@ -12,7 +12,6 @@
12#include <linux/of.h> 12#include <linux/of.h>
13#include <linux/irqdomain.h> 13#include <linux/irqdomain.h>
14#include <linux/irqchip.h> 14#include <linux/irqchip.h>
15#include "../../drivers/irqchip/irqchip.h"
16#include <asm/irq.h> 15#include <asm/irq.h>
17 16
18/* 17/*
diff --git a/arch/arc/kernel/intc-compact.c b/arch/arc/kernel/intc-compact.c
index fcdddb631766..039fac30b5c1 100644
--- a/arch/arc/kernel/intc-compact.c
+++ b/arch/arc/kernel/intc-compact.c
@@ -12,7 +12,6 @@
12#include <linux/of.h> 12#include <linux/of.h>
13#include <linux/irqdomain.h> 13#include <linux/irqdomain.h>
14#include <linux/irqchip.h> 14#include <linux/irqchip.h>
15#include "../../drivers/irqchip/irqchip.h"
16#include <asm/irq.h> 15#include <asm/irq.h>
17 16
18/* 17/*
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index 30284e8de6ff..2fb86589054d 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -175,7 +175,6 @@ void mcip_init_early_smp(void)
175#include <linux/irqchip.h> 175#include <linux/irqchip.h>
176#include <linux/of.h> 176#include <linux/of.h>
177#include <linux/of_irq.h> 177#include <linux/of_irq.h>
178#include "../../drivers/irqchip/irqchip.h"
179 178
180/* 179/*
181 * Set the DEST for @cmn_irq to @cpu_mask (1 bit per core) 180 * Set the DEST for @cmn_irq to @cpu_mask (1 bit per core)
@@ -218,11 +217,28 @@ static void idu_irq_unmask(struct irq_data *data)
218 raw_spin_unlock_irqrestore(&mcip_lock, flags); 217 raw_spin_unlock_irqrestore(&mcip_lock, flags);
219} 218}
220 219
220#ifdef CONFIG_SMP
221static int 221static int
222idu_irq_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool f) 222idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
223 bool force)
223{ 224{
225 unsigned long flags;
226 cpumask_t online;
227
228 /* errout if no online cpu per @cpumask */
229 if (!cpumask_and(&online, cpumask, cpu_online_mask))
230 return -EINVAL;
231
232 raw_spin_lock_irqsave(&mcip_lock, flags);
233
234 idu_set_dest(data->hwirq, cpumask_bits(&online)[0]);
235 idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR);
236
237 raw_spin_unlock_irqrestore(&mcip_lock, flags);
238
224 return IRQ_SET_MASK_OK; 239 return IRQ_SET_MASK_OK;
225} 240}
241#endif
226 242
227static struct irq_chip idu_irq_chip = { 243static struct irq_chip idu_irq_chip = {
228 .name = "MCIP IDU Intc", 244 .name = "MCIP IDU Intc",
@@ -330,8 +346,7 @@ idu_of_init(struct device_node *intc, struct device_node *parent)
330 if (!i) 346 if (!i)
331 idu_first_irq = irq; 347 idu_first_irq = irq;
332 348
333 irq_set_handler_data(irq, domain); 349 irq_set_chained_handler_and_data(irq, idu_cascade_isr, domain);
334 irq_set_chained_handler(irq, idu_cascade_isr);
335 } 350 }
336 351
337 __mcip_cmd(CMD_IDU_ENABLE, 0); 352 __mcip_cmd(CMD_IDU_ENABLE, 0);
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index a3d186211ed3..cabde9dc0696 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -47,6 +47,7 @@ static void read_arc_build_cfg_regs(void)
47 struct bcr_perip uncached_space; 47 struct bcr_perip uncached_space;
48 struct bcr_generic bcr; 48 struct bcr_generic bcr;
49 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; 49 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
50 unsigned long perip_space;
50 FIX_PTR(cpu); 51 FIX_PTR(cpu);
51 52
52 READ_BCR(AUX_IDENTITY, cpu->core); 53 READ_BCR(AUX_IDENTITY, cpu->core);
@@ -56,7 +57,12 @@ static void read_arc_build_cfg_regs(void)
56 cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE); 57 cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
57 58
58 READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space); 59 READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space);
59 BUG_ON((uncached_space.start << 24) != ARC_UNCACHED_ADDR_SPACE); 60 if (uncached_space.ver < 3)
61 perip_space = uncached_space.start << 24;
62 else
63 perip_space = read_aux_reg(AUX_NON_VOL) & 0xF0000000;
64
65 BUG_ON(perip_space != ARC_UNCACHED_ADDR_SPACE);
60 66
61 READ_BCR(ARC_REG_MUL_BCR, cpu->extn_mpy); 67 READ_BCR(ARC_REG_MUL_BCR, cpu->extn_mpy);
62 68
@@ -142,17 +148,22 @@ static void read_arc_build_cfg_regs(void)
142} 148}
143 149
144static const struct cpuinfo_data arc_cpu_tbl[] = { 150static const struct cpuinfo_data arc_cpu_tbl[] = {
151#ifdef CONFIG_ISA_ARCOMPACT
145 { {0x20, "ARC 600" }, 0x2F}, 152 { {0x20, "ARC 600" }, 0x2F},
146 { {0x30, "ARC 700" }, 0x33}, 153 { {0x30, "ARC 700" }, 0x33},
147 { {0x34, "ARC 700 R4.10"}, 0x34}, 154 { {0x34, "ARC 700 R4.10"}, 0x34},
148 { {0x35, "ARC 700 R4.11"}, 0x35}, 155 { {0x35, "ARC 700 R4.11"}, 0x35},
149 { {0x50, "ARC HS38" }, 0x51}, 156#else
157 { {0x50, "ARC HS38 R2.0"}, 0x51},
158 { {0x52, "ARC HS38 R2.1"}, 0x52},
159#endif
150 { {0x00, NULL } } 160 { {0x00, NULL } }
151}; 161};
152 162
153#define IS_AVAIL1(v, str) ((v) ? str : "") 163#define IS_AVAIL1(v, s) ((v) ? s : "")
154#define IS_USED(cfg) (IS_ENABLED(cfg) ? "" : "(not used) ") 164#define IS_USED_RUN(v) ((v) ? "" : "(not used) ")
155#define IS_AVAIL2(v, str, cfg) IS_AVAIL1(v, str), IS_AVAIL1(v, IS_USED(cfg)) 165#define IS_USED_CFG(cfg) IS_USED_RUN(IS_ENABLED(cfg))
166#define IS_AVAIL2(v, s, cfg) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_USED_CFG(cfg))
156 167
157static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) 168static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
158{ 169{
@@ -226,7 +237,7 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
226 n += scnprintf(buf + n, len - n, "mpy[opt %d] ", opt); 237 n += scnprintf(buf + n, len - n, "mpy[opt %d] ", opt);
227 } 238 }
228 n += scnprintf(buf + n, len - n, "%s", 239 n += scnprintf(buf + n, len - n, "%s",
229 IS_USED(CONFIG_ARC_HAS_HW_MPY)); 240 IS_USED_CFG(CONFIG_ARC_HAS_HW_MPY));
230 } 241 }
231 242
232 n += scnprintf(buf + n, len - n, "%s%s%s%s%s%s%s%s\n", 243 n += scnprintf(buf + n, len - n, "%s%s%s%s%s%s%s%s\n",
@@ -325,6 +336,10 @@ static void arc_chk_core_config(void)
325 pr_warn("CONFIG_ARC_FPU_SAVE_RESTORE needed for working apps\n"); 336 pr_warn("CONFIG_ARC_FPU_SAVE_RESTORE needed for working apps\n");
326 else if (!cpu->extn.fpu_dp && fpu_enabled) 337 else if (!cpu->extn.fpu_dp && fpu_enabled)
327 panic("FPU non-existent, disable CONFIG_ARC_FPU_SAVE_RESTORE\n"); 338 panic("FPU non-existent, disable CONFIG_ARC_FPU_SAVE_RESTORE\n");
339
340 if (is_isa_arcv2() && IS_ENABLED(CONFIG_SMP) && cpu->isa.atomic &&
341 !IS_ENABLED(CONFIG_ARC_STAR_9000923308))
342 panic("llock/scond livelock workaround missing\n");
328} 343}
329 344
330/* 345/*
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c
index 3364d2bbc515..4294761a2b3e 100644
--- a/arch/arc/kernel/time.c
+++ b/arch/arc/kernel/time.c
@@ -203,34 +203,24 @@ static int arc_clkevent_set_next_event(unsigned long delta,
203 return 0; 203 return 0;
204} 204}
205 205
206static void arc_clkevent_set_mode(enum clock_event_mode mode, 206static int arc_clkevent_set_periodic(struct clock_event_device *dev)
207 struct clock_event_device *dev)
208{ 207{
209 switch (mode) { 208 /*
210 case CLOCK_EVT_MODE_PERIODIC: 209 * At X Hz, 1 sec = 1000ms -> X cycles;
211 /* 210 * 10ms -> X / 100 cycles
212 * At X Hz, 1 sec = 1000ms -> X cycles; 211 */
213 * 10ms -> X / 100 cycles 212 arc_timer_event_setup(arc_get_core_freq() / HZ);
214 */ 213 return 0;
215 arc_timer_event_setup(arc_get_core_freq() / HZ);
216 break;
217 case CLOCK_EVT_MODE_ONESHOT:
218 break;
219 default:
220 break;
221 }
222
223 return;
224} 214}
225 215
226static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = { 216static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = {
227 .name = "ARC Timer0", 217 .name = "ARC Timer0",
228 .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, 218 .features = CLOCK_EVT_FEAT_ONESHOT |
229 .mode = CLOCK_EVT_MODE_UNUSED, 219 CLOCK_EVT_FEAT_PERIODIC,
230 .rating = 300, 220 .rating = 300,
231 .irq = TIMER0_IRQ, /* hardwired, no need for resources */ 221 .irq = TIMER0_IRQ, /* hardwired, no need for resources */
232 .set_next_event = arc_clkevent_set_next_event, 222 .set_next_event = arc_clkevent_set_next_event,
233 .set_mode = arc_clkevent_set_mode, 223 .set_state_periodic = arc_clkevent_set_periodic,
234}; 224};
235 225
236static irqreturn_t timer_irq_handler(int irq, void *dev_id) 226static irqreturn_t timer_irq_handler(int irq, void *dev_id)
@@ -240,7 +230,7 @@ static irqreturn_t timer_irq_handler(int irq, void *dev_id)
240 * irq_set_chip_and_handler() asked for handle_percpu_devid_irq() 230 * irq_set_chip_and_handler() asked for handle_percpu_devid_irq()
241 */ 231 */
242 struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); 232 struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
243 int irq_reenable = evt->mode == CLOCK_EVT_MODE_PERIODIC; 233 int irq_reenable = clockevent_state_periodic(evt);
244 234
245 /* 235 /*
246 * Any write to CTRL reg ACks the interrupt, we rewrite the 236 * Any write to CTRL reg ACks the interrupt, we rewrite the
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index 807f7d61d7a7..a6f91e88ce36 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -58,7 +58,6 @@ static void show_callee_regs(struct callee_regs *cregs)
58 58
59static void print_task_path_n_nm(struct task_struct *tsk, char *buf) 59static void print_task_path_n_nm(struct task_struct *tsk, char *buf)
60{ 60{
61 struct path path;
62 char *path_nm = NULL; 61 char *path_nm = NULL;
63 struct mm_struct *mm; 62 struct mm_struct *mm;
64 struct file *exe_file; 63 struct file *exe_file;
diff --git a/arch/arc/lib/memcpy-archs.S b/arch/arc/lib/memcpy-archs.S
index 1b2b3acfed52..0cab0b8a57c5 100644
--- a/arch/arc/lib/memcpy-archs.S
+++ b/arch/arc/lib/memcpy-archs.S
@@ -206,7 +206,7 @@ unalignedOffby3:
206 ld.ab r6, [r1, 4] 206 ld.ab r6, [r1, 4]
207 prefetch [r1, 28] ;Prefetch the next read location 207 prefetch [r1, 28] ;Prefetch the next read location
208 ld.ab r8, [r1,4] 208 ld.ab r8, [r1,4]
209 prefetch [r3, 32] ;Prefetch the next write location 209 prefetchw [r3, 32] ;Prefetch the next write location
210 210
211 SHIFT_1 (r7, r6, 8) 211 SHIFT_1 (r7, r6, 8)
212 or r7, r7, r5 212 or r7, r7, r5
diff --git a/arch/arc/lib/memset-archs.S b/arch/arc/lib/memset-archs.S
index 92d573c734b5..365b18364815 100644
--- a/arch/arc/lib/memset-archs.S
+++ b/arch/arc/lib/memset-archs.S
@@ -10,12 +10,6 @@
10 10
11#undef PREALLOC_NOT_AVAIL 11#undef PREALLOC_NOT_AVAIL
12 12
13#ifdef PREALLOC_NOT_AVAIL
14#define PREWRITE(A,B) prefetchw [(A),(B)]
15#else
16#define PREWRITE(A,B) prealloc [(A),(B)]
17#endif
18
19ENTRY(memset) 13ENTRY(memset)
20 prefetchw [r0] ; Prefetch the write location 14 prefetchw [r0] ; Prefetch the write location
21 mov.f 0, r2 15 mov.f 0, r2
@@ -51,9 +45,15 @@ ENTRY(memset)
51 45
52;;; Convert len to Dwords, unfold x8 46;;; Convert len to Dwords, unfold x8
53 lsr.f lp_count, lp_count, 6 47 lsr.f lp_count, lp_count, 6
48
54 lpnz @.Lset64bytes 49 lpnz @.Lset64bytes
55 ;; LOOP START 50 ;; LOOP START
56 PREWRITE(r3, 64) ;Prefetch the next write location 51#ifdef PREALLOC_NOT_AVAIL
52 prefetchw [r3, 64] ;Prefetch the next write location
53#else
54 prealloc [r3, 64]
55#endif
56#ifdef CONFIG_ARC_HAS_LL64
57 std.ab r4, [r3, 8] 57 std.ab r4, [r3, 8]
58 std.ab r4, [r3, 8] 58 std.ab r4, [r3, 8]
59 std.ab r4, [r3, 8] 59 std.ab r4, [r3, 8]
@@ -62,16 +62,45 @@ ENTRY(memset)
62 std.ab r4, [r3, 8] 62 std.ab r4, [r3, 8]
63 std.ab r4, [r3, 8] 63 std.ab r4, [r3, 8]
64 std.ab r4, [r3, 8] 64 std.ab r4, [r3, 8]
65#else
66 st.ab r4, [r3, 4]
67 st.ab r4, [r3, 4]
68 st.ab r4, [r3, 4]
69 st.ab r4, [r3, 4]
70 st.ab r4, [r3, 4]
71 st.ab r4, [r3, 4]
72 st.ab r4, [r3, 4]
73 st.ab r4, [r3, 4]
74 st.ab r4, [r3, 4]
75 st.ab r4, [r3, 4]
76 st.ab r4, [r3, 4]
77 st.ab r4, [r3, 4]
78 st.ab r4, [r3, 4]
79 st.ab r4, [r3, 4]
80 st.ab r4, [r3, 4]
81 st.ab r4, [r3, 4]
82#endif
65.Lset64bytes: 83.Lset64bytes:
66 84
67 lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes 85 lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes
68 lpnz .Lset32bytes 86 lpnz .Lset32bytes
69 ;; LOOP START 87 ;; LOOP START
70 prefetchw [r3, 32] ;Prefetch the next write location 88 prefetchw [r3, 32] ;Prefetch the next write location
89#ifdef CONFIG_ARC_HAS_LL64
71 std.ab r4, [r3, 8] 90 std.ab r4, [r3, 8]
72 std.ab r4, [r3, 8] 91 std.ab r4, [r3, 8]
73 std.ab r4, [r3, 8] 92 std.ab r4, [r3, 8]
74 std.ab r4, [r3, 8] 93 std.ab r4, [r3, 8]
94#else
95 st.ab r4, [r3, 4]
96 st.ab r4, [r3, 4]
97 st.ab r4, [r3, 4]
98 st.ab r4, [r3, 4]
99 st.ab r4, [r3, 4]
100 st.ab r4, [r3, 4]
101 st.ab r4, [r3, 4]
102 st.ab r4, [r3, 4]
103#endif
75.Lset32bytes: 104.Lset32bytes:
76 105
77 and.f lp_count, r2, 0x1F ;Last remaining 31 bytes 106 and.f lp_count, r2, 0x1F ;Last remaining 31 bytes
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index b29d62ed4f7e..1cd6695b6ab5 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -468,10 +468,18 @@ static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
468noinline void slc_op(unsigned long paddr, unsigned long sz, const int op) 468noinline void slc_op(unsigned long paddr, unsigned long sz, const int op)
469{ 469{
470#ifdef CONFIG_ISA_ARCV2 470#ifdef CONFIG_ISA_ARCV2
471 /*
472 * SLC is shared between all cores and concurrent aux operations from
473 * multiple cores need to be serialized using a spinlock
474 * A concurrent operation can be silently ignored and/or the old/new
475 * operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop
476 * below)
477 */
478 static DEFINE_SPINLOCK(lock);
471 unsigned long flags; 479 unsigned long flags;
472 unsigned int ctrl; 480 unsigned int ctrl;
473 481
474 local_irq_save(flags); 482 spin_lock_irqsave(&lock, flags);
475 483
476 /* 484 /*
477 * The Region Flush operation is specified by CTRL.RGN_OP[11..9] 485 * The Region Flush operation is specified by CTRL.RGN_OP[11..9]
@@ -504,7 +512,7 @@ noinline void slc_op(unsigned long paddr, unsigned long sz, const int op)
504 512
505 while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY); 513 while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
506 514
507 local_irq_restore(flags); 515 spin_unlock_irqrestore(&lock, flags);
508#endif 516#endif
509} 517}
510 518
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index 74a637a1cfc4..57706a9c6948 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -60,8 +60,8 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
60 60
61 /* This is kernel Virtual address (0x7000_0000 based) */ 61 /* This is kernel Virtual address (0x7000_0000 based) */
62 kvaddr = ioremap_nocache((unsigned long)paddr, size); 62 kvaddr = ioremap_nocache((unsigned long)paddr, size);
63 if (kvaddr != NULL) 63 if (kvaddr == NULL)
64 memset(kvaddr, 0, size); 64 return NULL;
65 65
66 /* This is bus address, platform dependent */ 66 /* This is bus address, platform dependent */
67 *dma_handle = (dma_addr_t)paddr; 67 *dma_handle = (dma_addr_t)paddr;
diff --git a/arch/arc/plat-axs10x/axs10x.c b/arch/arc/plat-axs10x/axs10x.c
index 99f7da513a48..e7769c3ab5f2 100644
--- a/arch/arc/plat-axs10x/axs10x.c
+++ b/arch/arc/plat-axs10x/axs10x.c
@@ -389,6 +389,21 @@ axs103_set_freq(unsigned int id, unsigned int fd, unsigned int od)
389 389
390static void __init axs103_early_init(void) 390static void __init axs103_early_init(void)
391{ 391{
392 /*
393 * AXS103 configurations for SMP/QUAD configurations share device tree
394 * which defaults to 90 MHz. However recent failures of Quad config
395 * revealed P&R timing violations so clamp it down to safe 50 MHz
396 * Instead of duplicating defconfig/DT for SMP/QUAD, add a small hack
397 *
398 * This hack is really hacky as of now. Fix it properly by getting the
399 * number of cores as return value of platform's early SMP callback
400 */
401#ifdef CONFIG_ARC_MCIP
402 unsigned int num_cores = (read_aux_reg(ARC_REG_MCIP_BCR) >> 16) & 0x3F;
403 if (num_cores > 2)
404 arc_set_core_freq(50 * 1000000);
405#endif
406
392 switch (arc_get_core_freq()/1000000) { 407 switch (arc_get_core_freq()/1000000) {
393 case 33: 408 case 33:
394 axs103_set_freq(1, 1, 1); 409 axs103_set_freq(1, 1, 1);
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 07ab3d203916..7451b447cc2d 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -312,6 +312,9 @@ INSTALL_TARGETS = zinstall uinstall install
312 312
313PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS) 313PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS)
314 314
315bootpImage uImage: zImage
316zImage: Image
317
315$(BOOT_TARGETS): vmlinux 318$(BOOT_TARGETS): vmlinux
316 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@ 319 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
317 320
diff --git a/arch/arm/boot/dts/am335x-pepper.dts b/arch/arm/boot/dts/am335x-pepper.dts
index 0d35ab64641c..7106114c7464 100644
--- a/arch/arm/boot/dts/am335x-pepper.dts
+++ b/arch/arm/boot/dts/am335x-pepper.dts
@@ -74,6 +74,7 @@
74 audio_codec: tlv320aic3106@1b { 74 audio_codec: tlv320aic3106@1b {
75 compatible = "ti,tlv320aic3106"; 75 compatible = "ti,tlv320aic3106";
76 reg = <0x1b>; 76 reg = <0x1b>;
77 ai3x-micbias-vg = <0x2>;
77 }; 78 };
78 79
79 accel: lis331dlh@1d { 80 accel: lis331dlh@1d {
@@ -153,7 +154,7 @@
153 ti,audio-routing = 154 ti,audio-routing =
154 "Headphone Jack", "HPLOUT", 155 "Headphone Jack", "HPLOUT",
155 "Headphone Jack", "HPROUT", 156 "Headphone Jack", "HPROUT",
156 "LINE1L", "Line In"; 157 "MIC3L", "Mic3L Switch";
157}; 158};
158 159
159&mcasp0 { 160&mcasp0 {
@@ -438,41 +439,50 @@
438 regulators { 439 regulators {
439 dcdc1_reg: regulator@0 { 440 dcdc1_reg: regulator@0 {
440 /* VDD_1V8 system supply */ 441 /* VDD_1V8 system supply */
442 regulator-always-on;
441 }; 443 };
442 444
443 dcdc2_reg: regulator@1 { 445 dcdc2_reg: regulator@1 {
444 /* VDD_CORE voltage limits 0.95V - 1.26V with +/-4% tolerance */ 446 /* VDD_CORE voltage limits 0.95V - 1.26V with +/-4% tolerance */
445 regulator-name = "vdd_core"; 447 regulator-name = "vdd_core";
446 regulator-min-microvolt = <925000>; 448 regulator-min-microvolt = <925000>;
447 regulator-max-microvolt = <1325000>; 449 regulator-max-microvolt = <1150000>;
448 regulator-boot-on; 450 regulator-boot-on;
451 regulator-always-on;
449 }; 452 };
450 453
451 dcdc3_reg: regulator@2 { 454 dcdc3_reg: regulator@2 {
452 /* VDD_MPU voltage limits 0.95V - 1.1V with +/-4% tolerance */ 455 /* VDD_MPU voltage limits 0.95V - 1.1V with +/-4% tolerance */
453 regulator-name = "vdd_mpu"; 456 regulator-name = "vdd_mpu";
454 regulator-min-microvolt = <925000>; 457 regulator-min-microvolt = <925000>;
455 regulator-max-microvolt = <1150000>; 458 regulator-max-microvolt = <1325000>;
456 regulator-boot-on; 459 regulator-boot-on;
460 regulator-always-on;
457 }; 461 };
458 462
459 ldo1_reg: regulator@3 { 463 ldo1_reg: regulator@3 {
460 /* VRTC 1.8V always-on supply */ 464 /* VRTC 1.8V always-on supply */
465 regulator-name = "vrtc,vdds";
461 regulator-always-on; 466 regulator-always-on;
462 }; 467 };
463 468
464 ldo2_reg: regulator@4 { 469 ldo2_reg: regulator@4 {
465 /* 3.3V rail */ 470 /* 3.3V rail */
471 regulator-name = "vdd_3v3aux";
472 regulator-always-on;
466 }; 473 };
467 474
468 ldo3_reg: regulator@5 { 475 ldo3_reg: regulator@5 {
469 /* VDD_3V3A 3.3V rail */ 476 /* VDD_3V3A 3.3V rail */
477 regulator-name = "vdd_3v3a";
470 regulator-min-microvolt = <3300000>; 478 regulator-min-microvolt = <3300000>;
471 regulator-max-microvolt = <3300000>; 479 regulator-max-microvolt = <3300000>;
472 }; 480 };
473 481
474 ldo4_reg: regulator@6 { 482 ldo4_reg: regulator@6 {
475 /* VDD_3V3B 3.3V rail */ 483 /* VDD_3V3B 3.3V rail */
484 regulator-name = "vdd_3v3b";
485 regulator-always-on;
476 }; 486 };
477 }; 487 };
478}; 488};
diff --git a/arch/arm/boot/dts/cros-ec-keyboard.dtsi b/arch/arm/boot/dts/cros-ec-keyboard.dtsi
index 9c7fb0acae79..4e42f30cb318 100644
--- a/arch/arm/boot/dts/cros-ec-keyboard.dtsi
+++ b/arch/arm/boot/dts/cros-ec-keyboard.dtsi
@@ -22,6 +22,7 @@
22 MATRIX_KEY(0x00, 0x02, KEY_F1) 22 MATRIX_KEY(0x00, 0x02, KEY_F1)
23 MATRIX_KEY(0x00, 0x03, KEY_B) 23 MATRIX_KEY(0x00, 0x03, KEY_B)
24 MATRIX_KEY(0x00, 0x04, KEY_F10) 24 MATRIX_KEY(0x00, 0x04, KEY_F10)
25 MATRIX_KEY(0x00, 0x05, KEY_RO)
25 MATRIX_KEY(0x00, 0x06, KEY_N) 26 MATRIX_KEY(0x00, 0x06, KEY_N)
26 MATRIX_KEY(0x00, 0x08, KEY_EQUAL) 27 MATRIX_KEY(0x00, 0x08, KEY_EQUAL)
27 MATRIX_KEY(0x00, 0x0a, KEY_RIGHTALT) 28 MATRIX_KEY(0x00, 0x0a, KEY_RIGHTALT)
@@ -34,6 +35,7 @@
34 MATRIX_KEY(0x01, 0x08, KEY_APOSTROPHE) 35 MATRIX_KEY(0x01, 0x08, KEY_APOSTROPHE)
35 MATRIX_KEY(0x01, 0x09, KEY_F9) 36 MATRIX_KEY(0x01, 0x09, KEY_F9)
36 MATRIX_KEY(0x01, 0x0b, KEY_BACKSPACE) 37 MATRIX_KEY(0x01, 0x0b, KEY_BACKSPACE)
38 MATRIX_KEY(0x01, 0x0c, KEY_HENKAN)
37 39
38 MATRIX_KEY(0x02, 0x00, KEY_LEFTCTRL) 40 MATRIX_KEY(0x02, 0x00, KEY_LEFTCTRL)
39 MATRIX_KEY(0x02, 0x01, KEY_TAB) 41 MATRIX_KEY(0x02, 0x01, KEY_TAB)
@@ -45,6 +47,7 @@
45 MATRIX_KEY(0x02, 0x07, KEY_102ND) 47 MATRIX_KEY(0x02, 0x07, KEY_102ND)
46 MATRIX_KEY(0x02, 0x08, KEY_LEFTBRACE) 48 MATRIX_KEY(0x02, 0x08, KEY_LEFTBRACE)
47 MATRIX_KEY(0x02, 0x09, KEY_F8) 49 MATRIX_KEY(0x02, 0x09, KEY_F8)
50 MATRIX_KEY(0x02, 0x0a, KEY_YEN)
48 51
49 MATRIX_KEY(0x03, 0x01, KEY_GRAVE) 52 MATRIX_KEY(0x03, 0x01, KEY_GRAVE)
50 MATRIX_KEY(0x03, 0x02, KEY_F2) 53 MATRIX_KEY(0x03, 0x02, KEY_F2)
@@ -53,6 +56,7 @@
53 MATRIX_KEY(0x03, 0x06, KEY_6) 56 MATRIX_KEY(0x03, 0x06, KEY_6)
54 MATRIX_KEY(0x03, 0x08, KEY_MINUS) 57 MATRIX_KEY(0x03, 0x08, KEY_MINUS)
55 MATRIX_KEY(0x03, 0x0b, KEY_BACKSLASH) 58 MATRIX_KEY(0x03, 0x0b, KEY_BACKSLASH)
59 MATRIX_KEY(0x03, 0x0c, KEY_MUHENKAN)
56 60
57 MATRIX_KEY(0x04, 0x00, KEY_RIGHTCTRL) 61 MATRIX_KEY(0x04, 0x00, KEY_RIGHTCTRL)
58 MATRIX_KEY(0x04, 0x01, KEY_A) 62 MATRIX_KEY(0x04, 0x01, KEY_A)
diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
index aa465904f6cc..096f68be99e2 100644
--- a/arch/arm/boot/dts/dra7-evm.dts
+++ b/arch/arm/boot/dts/dra7-evm.dts
@@ -686,7 +686,8 @@
686 686
687&dcan1 { 687&dcan1 {
688 status = "ok"; 688 status = "ok";
689 pinctrl-names = "default", "sleep"; 689 pinctrl-names = "default", "sleep", "active";
690 pinctrl-0 = <&dcan1_pins_default>; 690 pinctrl-0 = <&dcan1_pins_sleep>;
691 pinctrl-1 = <&dcan1_pins_sleep>; 691 pinctrl-1 = <&dcan1_pins_sleep>;
692 pinctrl-2 = <&dcan1_pins_default>;
692}; 693};
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 8f1e25bcecbd..1e29ccf77ea2 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -116,7 +116,7 @@
116 ranges = <0 0x2000 0x2000>; 116 ranges = <0 0x2000 0x2000>;
117 117
118 scm_conf: scm_conf@0 { 118 scm_conf: scm_conf@0 {
119 compatible = "syscon"; 119 compatible = "syscon", "simple-bus";
120 reg = <0x0 0x1400>; 120 reg = <0x0 0x1400>;
121 #address-cells = <1>; 121 #address-cells = <1>;
122 #size-cells = <1>; 122 #size-cells = <1>;
@@ -1140,6 +1140,7 @@
1140 ctrl-module = <&omap_control_sata>; 1140 ctrl-module = <&omap_control_sata>;
1141 clocks = <&sys_clkin1>, <&sata_ref_clk>; 1141 clocks = <&sys_clkin1>, <&sata_ref_clk>;
1142 clock-names = "sysclk", "refclk"; 1142 clock-names = "sysclk", "refclk";
1143 syscon-pllreset = <&scm_conf 0x3fc>;
1143 #phy-cells = <0>; 1144 #phy-cells = <0>;
1144 }; 1145 };
1145 1146
diff --git a/arch/arm/boot/dts/dra72-evm.dts b/arch/arm/boot/dts/dra72-evm.dts
index 4e1b60581782..803738414086 100644
--- a/arch/arm/boot/dts/dra72-evm.dts
+++ b/arch/arm/boot/dts/dra72-evm.dts
@@ -587,9 +587,10 @@
587 587
588&dcan1 { 588&dcan1 {
589 status = "ok"; 589 status = "ok";
590 pinctrl-names = "default", "sleep"; 590 pinctrl-names = "default", "sleep", "active";
591 pinctrl-0 = <&dcan1_pins_default>; 591 pinctrl-0 = <&dcan1_pins_sleep>;
592 pinctrl-1 = <&dcan1_pins_sleep>; 592 pinctrl-1 = <&dcan1_pins_sleep>;
593 pinctrl-2 = <&dcan1_pins_default>;
593}; 594};
594 595
595&qspi { 596&qspi {
diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi
index d7201333e3bc..2db99433e17f 100644
--- a/arch/arm/boot/dts/exynos3250.dtsi
+++ b/arch/arm/boot/dts/exynos3250.dtsi
@@ -138,8 +138,8 @@
138 138
139 mipi_phy: video-phy@10020710 { 139 mipi_phy: video-phy@10020710 {
140 compatible = "samsung,s5pv210-mipi-video-phy"; 140 compatible = "samsung,s5pv210-mipi-video-phy";
141 reg = <0x10020710 8>;
142 #phy-cells = <1>; 141 #phy-cells = <1>;
142 syscon = <&pmu_system_controller>;
143 }; 143 };
144 144
145 pd_cam: cam-power-domain@10023C00 { 145 pd_cam: cam-power-domain@10023C00 {
diff --git a/arch/arm/boot/dts/exynos4210-origen.dts b/arch/arm/boot/dts/exynos4210-origen.dts
index e0abfc3324d1..e050d85cdacd 100644
--- a/arch/arm/boot/dts/exynos4210-origen.dts
+++ b/arch/arm/boot/dts/exynos4210-origen.dts
@@ -127,6 +127,10 @@
127 }; 127 };
128}; 128};
129 129
130&cpu0 {
131 cpu0-supply = <&buck1_reg>;
132};
133
130&fimd { 134&fimd {
131 pinctrl-0 = <&lcd_en &lcd_clk &lcd_data24 &pwm0_out>; 135 pinctrl-0 = <&lcd_en &lcd_clk &lcd_data24 &pwm0_out>;
132 pinctrl-names = "default"; 136 pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/exynos4210-trats.dts b/arch/arm/boot/dts/exynos4210-trats.dts
index 98f3ce65cb9a..ba34886f8b65 100644
--- a/arch/arm/boot/dts/exynos4210-trats.dts
+++ b/arch/arm/boot/dts/exynos4210-trats.dts
@@ -188,6 +188,10 @@
188 }; 188 };
189}; 189};
190 190
191&cpu0 {
192 cpu0-supply = <&varm_breg>;
193};
194
191&dsi_0 { 195&dsi_0 {
192 vddcore-supply = <&vusb_reg>; 196 vddcore-supply = <&vusb_reg>;
193 vddio-supply = <&vmipi_reg>; 197 vddio-supply = <&vmipi_reg>;
diff --git a/arch/arm/boot/dts/exynos4210-universal_c210.dts b/arch/arm/boot/dts/exynos4210-universal_c210.dts
index d4f2b11319dd..775892b2cc6a 100644
--- a/arch/arm/boot/dts/exynos4210-universal_c210.dts
+++ b/arch/arm/boot/dts/exynos4210-universal_c210.dts
@@ -548,6 +548,10 @@
548 }; 548 };
549}; 549};
550 550
551&cpu0 {
552 cpu0-supply = <&vdd_arm_reg>;
553};
554
551&pinctrl_1 { 555&pinctrl_1 {
552 hdmi_hpd: hdmi-hpd { 556 hdmi_hpd: hdmi-hpd {
553 samsung,pins = "gpx3-7"; 557 samsung,pins = "gpx3-7";
diff --git a/arch/arm/boot/dts/exynos4210.dtsi b/arch/arm/boot/dts/exynos4210.dtsi
index 10d3c173396e..3e5ba665d200 100644
--- a/arch/arm/boot/dts/exynos4210.dtsi
+++ b/arch/arm/boot/dts/exynos4210.dtsi
@@ -40,6 +40,18 @@
40 device_type = "cpu"; 40 device_type = "cpu";
41 compatible = "arm,cortex-a9"; 41 compatible = "arm,cortex-a9";
42 reg = <0x900>; 42 reg = <0x900>;
43 clocks = <&clock CLK_ARM_CLK>;
44 clock-names = "cpu";
45 clock-latency = <160000>;
46
47 operating-points = <
48 1200000 1250000
49 1000000 1150000
50 800000 1075000
51 500000 975000
52 400000 975000
53 200000 950000
54 >;
43 cooling-min-level = <4>; 55 cooling-min-level = <4>;
44 cooling-max-level = <2>; 56 cooling-max-level = <2>;
45 #cooling-cells = <2>; /* min followed by max */ 57 #cooling-cells = <2>; /* min followed by max */
diff --git a/arch/arm/boot/dts/imx23.dtsi b/arch/arm/boot/dts/imx23.dtsi
index c892d58e8dad..b995333ea22b 100644
--- a/arch/arm/boot/dts/imx23.dtsi
+++ b/arch/arm/boot/dts/imx23.dtsi
@@ -468,6 +468,7 @@
468 interrupts = <36 37 38 39 40 41 42 43 44>; 468 interrupts = <36 37 38 39 40 41 42 43 44>;
469 status = "disabled"; 469 status = "disabled";
470 clocks = <&clks 26>; 470 clocks = <&clks 26>;
471 #io-channel-cells = <1>;
471 }; 472 };
472 473
473 spdif@80054000 { 474 spdif@80054000 {
diff --git a/arch/arm/boot/dts/imx25-pdk.dts b/arch/arm/boot/dts/imx25-pdk.dts
index dd45e6971bc3..9351296356dc 100644
--- a/arch/arm/boot/dts/imx25-pdk.dts
+++ b/arch/arm/boot/dts/imx25-pdk.dts
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12/dts-v1/; 12/dts-v1/;
13#include <dt-bindings/gpio/gpio.h>
13#include <dt-bindings/input/input.h> 14#include <dt-bindings/input/input.h>
14#include "imx25.dtsi" 15#include "imx25.dtsi"
15 16
@@ -114,8 +115,8 @@
114&esdhc1 { 115&esdhc1 {
115 pinctrl-names = "default"; 116 pinctrl-names = "default";
116 pinctrl-0 = <&pinctrl_esdhc1>; 117 pinctrl-0 = <&pinctrl_esdhc1>;
117 cd-gpios = <&gpio2 1 0>; 118 cd-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
118 wp-gpios = <&gpio2 0 0>; 119 wp-gpios = <&gpio2 0 GPIO_ACTIVE_HIGH>;
119 status = "okay"; 120 status = "okay";
120}; 121};
121 122
diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi
index bc215e4b75fd..b69be5c499cf 100644
--- a/arch/arm/boot/dts/imx27.dtsi
+++ b/arch/arm/boot/dts/imx27.dtsi
@@ -108,7 +108,7 @@
108 }; 108 };
109 109
110 gpt1: timer@10003000 { 110 gpt1: timer@10003000 {
111 compatible = "fsl,imx27-gpt", "fsl,imx1-gpt"; 111 compatible = "fsl,imx27-gpt", "fsl,imx21-gpt";
112 reg = <0x10003000 0x1000>; 112 reg = <0x10003000 0x1000>;
113 interrupts = <26>; 113 interrupts = <26>;
114 clocks = <&clks IMX27_CLK_GPT1_IPG_GATE>, 114 clocks = <&clks IMX27_CLK_GPT1_IPG_GATE>,
@@ -117,7 +117,7 @@
117 }; 117 };
118 118
119 gpt2: timer@10004000 { 119 gpt2: timer@10004000 {
120 compatible = "fsl,imx27-gpt", "fsl,imx1-gpt"; 120 compatible = "fsl,imx27-gpt", "fsl,imx21-gpt";
121 reg = <0x10004000 0x1000>; 121 reg = <0x10004000 0x1000>;
122 interrupts = <25>; 122 interrupts = <25>;
123 clocks = <&clks IMX27_CLK_GPT2_IPG_GATE>, 123 clocks = <&clks IMX27_CLK_GPT2_IPG_GATE>,
@@ -126,7 +126,7 @@
126 }; 126 };
127 127
128 gpt3: timer@10005000 { 128 gpt3: timer@10005000 {
129 compatible = "fsl,imx27-gpt", "fsl,imx1-gpt"; 129 compatible = "fsl,imx27-gpt", "fsl,imx21-gpt";
130 reg = <0x10005000 0x1000>; 130 reg = <0x10005000 0x1000>;
131 interrupts = <24>; 131 interrupts = <24>;
132 clocks = <&clks IMX27_CLK_GPT3_IPG_GATE>, 132 clocks = <&clks IMX27_CLK_GPT3_IPG_GATE>,
@@ -376,7 +376,7 @@
376 }; 376 };
377 377
378 gpt4: timer@10019000 { 378 gpt4: timer@10019000 {
379 compatible = "fsl,imx27-gpt", "fsl,imx1-gpt"; 379 compatible = "fsl,imx27-gpt", "fsl,imx21-gpt";
380 reg = <0x10019000 0x1000>; 380 reg = <0x10019000 0x1000>;
381 interrupts = <4>; 381 interrupts = <4>;
382 clocks = <&clks IMX27_CLK_GPT4_IPG_GATE>, 382 clocks = <&clks IMX27_CLK_GPT4_IPG_GATE>,
@@ -385,7 +385,7 @@
385 }; 385 };
386 386
387 gpt5: timer@1001a000 { 387 gpt5: timer@1001a000 {
388 compatible = "fsl,imx27-gpt", "fsl,imx1-gpt"; 388 compatible = "fsl,imx27-gpt", "fsl,imx21-gpt";
389 reg = <0x1001a000 0x1000>; 389 reg = <0x1001a000 0x1000>;
390 interrupts = <3>; 390 interrupts = <3>;
391 clocks = <&clks IMX27_CLK_GPT5_IPG_GATE>, 391 clocks = <&clks IMX27_CLK_GPT5_IPG_GATE>,
@@ -436,7 +436,7 @@
436 }; 436 };
437 437
438 gpt6: timer@1001f000 { 438 gpt6: timer@1001f000 {
439 compatible = "fsl,imx27-gpt", "fsl,imx1-gpt"; 439 compatible = "fsl,imx27-gpt", "fsl,imx21-gpt";
440 reg = <0x1001f000 0x1000>; 440 reg = <0x1001f000 0x1000>;
441 interrupts = <2>; 441 interrupts = <2>;
442 clocks = <&clks IMX27_CLK_GPT6_IPG_GATE>, 442 clocks = <&clks IMX27_CLK_GPT6_IPG_GATE>,
diff --git a/arch/arm/boot/dts/imx35.dtsi b/arch/arm/boot/dts/imx35.dtsi
index b6478e97d6a7..e6540b5cfa4c 100644
--- a/arch/arm/boot/dts/imx35.dtsi
+++ b/arch/arm/boot/dts/imx35.dtsi
@@ -286,8 +286,8 @@
286 can1: can@53fe4000 { 286 can1: can@53fe4000 {
287 compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan"; 287 compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
288 reg = <0x53fe4000 0x1000>; 288 reg = <0x53fe4000 0x1000>;
289 clocks = <&clks 33>; 289 clocks = <&clks 33>, <&clks 33>;
290 clock-names = "ipg"; 290 clock-names = "ipg", "per";
291 interrupts = <43>; 291 interrupts = <43>;
292 status = "disabled"; 292 status = "disabled";
293 }; 293 };
@@ -295,8 +295,8 @@
295 can2: can@53fe8000 { 295 can2: can@53fe8000 {
296 compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan"; 296 compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
297 reg = <0x53fe8000 0x1000>; 297 reg = <0x53fe8000 0x1000>;
298 clocks = <&clks 34>; 298 clocks = <&clks 34>, <&clks 34>;
299 clock-names = "ipg"; 299 clock-names = "ipg", "per";
300 interrupts = <44>; 300 interrupts = <44>;
301 status = "disabled"; 301 status = "disabled";
302 }; 302 };
diff --git a/arch/arm/boot/dts/imx51-apf51dev.dts b/arch/arm/boot/dts/imx51-apf51dev.dts
index 93d3ea12328c..0f3fe29b816e 100644
--- a/arch/arm/boot/dts/imx51-apf51dev.dts
+++ b/arch/arm/boot/dts/imx51-apf51dev.dts
@@ -98,7 +98,7 @@
98&esdhc1 { 98&esdhc1 {
99 pinctrl-names = "default"; 99 pinctrl-names = "default";
100 pinctrl-0 = <&pinctrl_esdhc1>; 100 pinctrl-0 = <&pinctrl_esdhc1>;
101 cd-gpios = <&gpio2 29 GPIO_ACTIVE_HIGH>; 101 cd-gpios = <&gpio2 29 GPIO_ACTIVE_LOW>;
102 bus-width = <4>; 102 bus-width = <4>;
103 status = "okay"; 103 status = "okay";
104}; 104};
diff --git a/arch/arm/boot/dts/imx53-ard.dts b/arch/arm/boot/dts/imx53-ard.dts
index e9337ad52f59..3bc18835fb4b 100644
--- a/arch/arm/boot/dts/imx53-ard.dts
+++ b/arch/arm/boot/dts/imx53-ard.dts
@@ -103,8 +103,8 @@
103&esdhc1 { 103&esdhc1 {
104 pinctrl-names = "default"; 104 pinctrl-names = "default";
105 pinctrl-0 = <&pinctrl_esdhc1>; 105 pinctrl-0 = <&pinctrl_esdhc1>;
106 cd-gpios = <&gpio1 1 0>; 106 cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
107 wp-gpios = <&gpio1 9 0>; 107 wp-gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>;
108 status = "okay"; 108 status = "okay";
109}; 109};
110 110
diff --git a/arch/arm/boot/dts/imx53-m53evk.dts b/arch/arm/boot/dts/imx53-m53evk.dts
index d0e0f57eb432..53f40885c530 100644
--- a/arch/arm/boot/dts/imx53-m53evk.dts
+++ b/arch/arm/boot/dts/imx53-m53evk.dts
@@ -124,8 +124,8 @@
124&esdhc1 { 124&esdhc1 {
125 pinctrl-names = "default"; 125 pinctrl-names = "default";
126 pinctrl-0 = <&pinctrl_esdhc1>; 126 pinctrl-0 = <&pinctrl_esdhc1>;
127 cd-gpios = <&gpio1 1 0>; 127 cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
128 wp-gpios = <&gpio1 9 0>; 128 wp-gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>;
129 status = "okay"; 129 status = "okay";
130}; 130};
131 131
diff --git a/arch/arm/boot/dts/imx53-qsb-common.dtsi b/arch/arm/boot/dts/imx53-qsb-common.dtsi
index 181ae5ebf23f..b0d5542ac829 100644
--- a/arch/arm/boot/dts/imx53-qsb-common.dtsi
+++ b/arch/arm/boot/dts/imx53-qsb-common.dtsi
@@ -147,8 +147,8 @@
147&esdhc3 { 147&esdhc3 {
148 pinctrl-names = "default"; 148 pinctrl-names = "default";
149 pinctrl-0 = <&pinctrl_esdhc3>; 149 pinctrl-0 = <&pinctrl_esdhc3>;
150 cd-gpios = <&gpio3 11 0>; 150 cd-gpios = <&gpio3 11 GPIO_ACTIVE_LOW>;
151 wp-gpios = <&gpio3 12 0>; 151 wp-gpios = <&gpio3 12 GPIO_ACTIVE_HIGH>;
152 bus-width = <8>; 152 bus-width = <8>;
153 status = "okay"; 153 status = "okay";
154}; 154};
@@ -295,9 +295,10 @@
295&tve { 295&tve {
296 pinctrl-names = "default"; 296 pinctrl-names = "default";
297 pinctrl-0 = <&pinctrl_vga_sync>; 297 pinctrl-0 = <&pinctrl_vga_sync>;
298 ddc-i2c-bus = <&i2c2>;
298 fsl,tve-mode = "vga"; 299 fsl,tve-mode = "vga";
299 fsl,hsync-pin = <4>; 300 fsl,hsync-pin = <7>; /* IPU DI1 PIN7 via EIM_OE */
300 fsl,vsync-pin = <6>; 301 fsl,vsync-pin = <8>; /* IPU DI1 PIN8 via EIM_RW */
301 status = "okay"; 302 status = "okay";
302}; 303};
303 304
diff --git a/arch/arm/boot/dts/imx53-smd.dts b/arch/arm/boot/dts/imx53-smd.dts
index 1d325576bcc0..fc89ce1e5763 100644
--- a/arch/arm/boot/dts/imx53-smd.dts
+++ b/arch/arm/boot/dts/imx53-smd.dts
@@ -41,8 +41,8 @@
41&esdhc1 { 41&esdhc1 {
42 pinctrl-names = "default"; 42 pinctrl-names = "default";
43 pinctrl-0 = <&pinctrl_esdhc1>; 43 pinctrl-0 = <&pinctrl_esdhc1>;
44 cd-gpios = <&gpio3 13 0>; 44 cd-gpios = <&gpio3 13 GPIO_ACTIVE_LOW>;
45 wp-gpios = <&gpio4 11 0>; 45 wp-gpios = <&gpio4 11 GPIO_ACTIVE_HIGH>;
46 status = "okay"; 46 status = "okay";
47}; 47};
48 48
diff --git a/arch/arm/boot/dts/imx53-tqma53.dtsi b/arch/arm/boot/dts/imx53-tqma53.dtsi
index 4f1f0e2868bf..e03373a58760 100644
--- a/arch/arm/boot/dts/imx53-tqma53.dtsi
+++ b/arch/arm/boot/dts/imx53-tqma53.dtsi
@@ -41,8 +41,8 @@
41 pinctrl-0 = <&pinctrl_esdhc2>, 41 pinctrl-0 = <&pinctrl_esdhc2>,
42 <&pinctrl_esdhc2_cdwp>; 42 <&pinctrl_esdhc2_cdwp>;
43 vmmc-supply = <&reg_3p3v>; 43 vmmc-supply = <&reg_3p3v>;
44 wp-gpios = <&gpio1 2 0>; 44 wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
45 cd-gpios = <&gpio1 4 0>; 45 cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
46 status = "disabled"; 46 status = "disabled";
47}; 47};
48 48
diff --git a/arch/arm/boot/dts/imx53-tx53.dtsi b/arch/arm/boot/dts/imx53-tx53.dtsi
index 704bd72cbfec..d3e50b22064f 100644
--- a/arch/arm/boot/dts/imx53-tx53.dtsi
+++ b/arch/arm/boot/dts/imx53-tx53.dtsi
@@ -183,7 +183,7 @@
183}; 183};
184 184
185&esdhc1 { 185&esdhc1 {
186 cd-gpios = <&gpio3 24 GPIO_ACTIVE_HIGH>; 186 cd-gpios = <&gpio3 24 GPIO_ACTIVE_LOW>;
187 fsl,wp-controller; 187 fsl,wp-controller;
188 pinctrl-names = "default"; 188 pinctrl-names = "default";
189 pinctrl-0 = <&pinctrl_esdhc1>; 189 pinctrl-0 = <&pinctrl_esdhc1>;
@@ -191,7 +191,7 @@
191}; 191};
192 192
193&esdhc2 { 193&esdhc2 {
194 cd-gpios = <&gpio3 25 GPIO_ACTIVE_HIGH>; 194 cd-gpios = <&gpio3 25 GPIO_ACTIVE_LOW>;
195 fsl,wp-controller; 195 fsl,wp-controller;
196 pinctrl-names = "default"; 196 pinctrl-names = "default";
197 pinctrl-0 = <&pinctrl_esdhc2>; 197 pinctrl-0 = <&pinctrl_esdhc2>;
diff --git a/arch/arm/boot/dts/imx53-voipac-bsb.dts b/arch/arm/boot/dts/imx53-voipac-bsb.dts
index c17d3ad6dba5..fc51b87ad208 100644
--- a/arch/arm/boot/dts/imx53-voipac-bsb.dts
+++ b/arch/arm/boot/dts/imx53-voipac-bsb.dts
@@ -119,8 +119,8 @@
119&esdhc2 { 119&esdhc2 {
120 pinctrl-names = "default"; 120 pinctrl-names = "default";
121 pinctrl-0 = <&pinctrl_esdhc2>; 121 pinctrl-0 = <&pinctrl_esdhc2>;
122 cd-gpios = <&gpio3 25 0>; 122 cd-gpios = <&gpio3 25 GPIO_ACTIVE_LOW>;
123 wp-gpios = <&gpio2 19 0>; 123 wp-gpios = <&gpio2 19 GPIO_ACTIVE_HIGH>;
124 vmmc-supply = <&reg_3p3v>; 124 vmmc-supply = <&reg_3p3v>;
125 status = "okay"; 125 status = "okay";
126}; 126};
diff --git a/arch/arm/boot/dts/imx6dl-riotboard.dts b/arch/arm/boot/dts/imx6dl-riotboard.dts
index 43cb3fd76be7..5111f5170d53 100644
--- a/arch/arm/boot/dts/imx6dl-riotboard.dts
+++ b/arch/arm/boot/dts/imx6dl-riotboard.dts
@@ -305,8 +305,8 @@
305&usdhc2 { 305&usdhc2 {
306 pinctrl-names = "default"; 306 pinctrl-names = "default";
307 pinctrl-0 = <&pinctrl_usdhc2>; 307 pinctrl-0 = <&pinctrl_usdhc2>;
308 cd-gpios = <&gpio1 4 0>; 308 cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
309 wp-gpios = <&gpio1 2 0>; 309 wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
310 vmmc-supply = <&reg_3p3v>; 310 vmmc-supply = <&reg_3p3v>;
311 status = "okay"; 311 status = "okay";
312}; 312};
@@ -314,8 +314,8 @@
314&usdhc3 { 314&usdhc3 {
315 pinctrl-names = "default"; 315 pinctrl-names = "default";
316 pinctrl-0 = <&pinctrl_usdhc3>; 316 pinctrl-0 = <&pinctrl_usdhc3>;
317 cd-gpios = <&gpio7 0 0>; 317 cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
318 wp-gpios = <&gpio7 1 0>; 318 wp-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>;
319 vmmc-supply = <&reg_3p3v>; 319 vmmc-supply = <&reg_3p3v>;
320 status = "okay"; 320 status = "okay";
321}; 321};
diff --git a/arch/arm/boot/dts/imx6q-arm2.dts b/arch/arm/boot/dts/imx6q-arm2.dts
index 78df05e9d1ce..d6515f7a56c4 100644
--- a/arch/arm/boot/dts/imx6q-arm2.dts
+++ b/arch/arm/boot/dts/imx6q-arm2.dts
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13/dts-v1/; 13/dts-v1/;
14#include <dt-bindings/gpio/gpio.h>
14#include "imx6q.dtsi" 15#include "imx6q.dtsi"
15 16
16/ { 17/ {
@@ -196,8 +197,8 @@
196}; 197};
197 198
198&usdhc3 { 199&usdhc3 {
199 cd-gpios = <&gpio6 11 0>; 200 cd-gpios = <&gpio6 11 GPIO_ACTIVE_LOW>;
200 wp-gpios = <&gpio6 14 0>; 201 wp-gpios = <&gpio6 14 GPIO_ACTIVE_HIGH>;
201 vmmc-supply = <&reg_3p3v>; 202 vmmc-supply = <&reg_3p3v>;
202 pinctrl-names = "default"; 203 pinctrl-names = "default";
203 pinctrl-0 = <&pinctrl_usdhc3 204 pinctrl-0 = <&pinctrl_usdhc3
diff --git a/arch/arm/boot/dts/imx6q-gk802.dts b/arch/arm/boot/dts/imx6q-gk802.dts
index 703539cf36d3..00bd63e63d0c 100644
--- a/arch/arm/boot/dts/imx6q-gk802.dts
+++ b/arch/arm/boot/dts/imx6q-gk802.dts
@@ -7,6 +7,7 @@
7 */ 7 */
8 8
9/dts-v1/; 9/dts-v1/;
10#include <dt-bindings/gpio/gpio.h>
10#include "imx6q.dtsi" 11#include "imx6q.dtsi"
11 12
12/ { 13/ {
@@ -161,7 +162,7 @@
161 pinctrl-names = "default"; 162 pinctrl-names = "default";
162 pinctrl-0 = <&pinctrl_usdhc3>; 163 pinctrl-0 = <&pinctrl_usdhc3>;
163 bus-width = <4>; 164 bus-width = <4>;
164 cd-gpios = <&gpio6 11 0>; 165 cd-gpios = <&gpio6 11 GPIO_ACTIVE_LOW>;
165 vmmc-supply = <&reg_3p3v>; 166 vmmc-supply = <&reg_3p3v>;
166 status = "okay"; 167 status = "okay";
167}; 168};
diff --git a/arch/arm/boot/dts/imx6q-tbs2910.dts b/arch/arm/boot/dts/imx6q-tbs2910.dts
index a43abfa21e33..5645d52850a7 100644
--- a/arch/arm/boot/dts/imx6q-tbs2910.dts
+++ b/arch/arm/boot/dts/imx6q-tbs2910.dts
@@ -251,7 +251,7 @@
251 pinctrl-names = "default"; 251 pinctrl-names = "default";
252 pinctrl-0 = <&pinctrl_usdhc2>; 252 pinctrl-0 = <&pinctrl_usdhc2>;
253 bus-width = <4>; 253 bus-width = <4>;
254 cd-gpios = <&gpio2 2 GPIO_ACTIVE_HIGH>; 254 cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
255 vmmc-supply = <&reg_3p3v>; 255 vmmc-supply = <&reg_3p3v>;
256 status = "okay"; 256 status = "okay";
257}; 257};
@@ -260,7 +260,7 @@
260 pinctrl-names = "default"; 260 pinctrl-names = "default";
261 pinctrl-0 = <&pinctrl_usdhc3>; 261 pinctrl-0 = <&pinctrl_usdhc3>;
262 bus-width = <4>; 262 bus-width = <4>;
263 cd-gpios = <&gpio2 0 GPIO_ACTIVE_HIGH>; 263 cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
264 wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>; 264 wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
265 vmmc-supply = <&reg_3p3v>; 265 vmmc-supply = <&reg_3p3v>;
266 status = "okay"; 266 status = "okay";
diff --git a/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi b/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi
index e6d9195a1da7..f4d6ae564ead 100644
--- a/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi
@@ -173,7 +173,7 @@
173 pinctrl-names = "default"; 173 pinctrl-names = "default";
174 pinctrl-0 = <&pinctrl_usdhc1>; 174 pinctrl-0 = <&pinctrl_usdhc1>;
175 vmmc-supply = <&reg_3p3v>; 175 vmmc-supply = <&reg_3p3v>;
176 cd-gpios = <&gpio4 7 GPIO_ACTIVE_HIGH>; 176 cd-gpios = <&gpio4 7 GPIO_ACTIVE_LOW>;
177 status = "okay"; 177 status = "okay";
178}; 178};
179 179
@@ -181,7 +181,7 @@
181 pinctrl-names = "default"; 181 pinctrl-names = "default";
182 pinctrl-0 = <&pinctrl_usdhc2>; 182 pinctrl-0 = <&pinctrl_usdhc2>;
183 vmmc-supply = <&reg_3p3v>; 183 vmmc-supply = <&reg_3p3v>;
184 cd-gpios = <&gpio4 8 GPIO_ACTIVE_HIGH>; 184 cd-gpios = <&gpio4 8 GPIO_ACTIVE_LOW>;
185 status = "okay"; 185 status = "okay";
186}; 186};
187 187
diff --git a/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi b/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi
index 1d85de2befb3..a47a0399a172 100644
--- a/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi
@@ -392,7 +392,7 @@
392&usdhc1 { 392&usdhc1 {
393 pinctrl-names = "default"; 393 pinctrl-names = "default";
394 pinctrl-0 = <&pinctrl_usdhc1>; 394 pinctrl-0 = <&pinctrl_usdhc1>;
395 cd-gpios = <&gpio1 27 GPIO_ACTIVE_HIGH>; 395 cd-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>;
396 no-1-8-v; 396 no-1-8-v;
397 status = "okay"; 397 status = "okay";
398}; 398};
@@ -400,7 +400,7 @@
400&usdhc2 { 400&usdhc2 {
401 pinctrl-names = "default"; 401 pinctrl-names = "default";
402 pinctrl-0 = <&pinctrl_usdhc2>; 402 pinctrl-0 = <&pinctrl_usdhc2>;
403 cd-gpios = <&gpio4 5 GPIO_ACTIVE_HIGH>; 403 cd-gpios = <&gpio4 5 GPIO_ACTIVE_LOW>;
404 wp-gpios = <&gpio2 10 GPIO_ACTIVE_HIGH>; 404 wp-gpios = <&gpio2 10 GPIO_ACTIVE_HIGH>;
405 no-1-8-v; 405 no-1-8-v;
406 status = "okay"; 406 status = "okay";
diff --git a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
index 59e5d15e3ec4..ff41f83551de 100644
--- a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
@@ -258,6 +258,6 @@
258 pinctrl-names = "default"; 258 pinctrl-names = "default";
259 pinctrl-0 = <&pinctrl_cubox_i_usdhc2_aux &pinctrl_cubox_i_usdhc2>; 259 pinctrl-0 = <&pinctrl_cubox_i_usdhc2_aux &pinctrl_cubox_i_usdhc2>;
260 vmmc-supply = <&reg_3p3v>; 260 vmmc-supply = <&reg_3p3v>;
261 cd-gpios = <&gpio1 4 0>; 261 cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
262 status = "okay"; 262 status = "okay";
263}; 263};
diff --git a/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi b/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi
index 2c253d6d20bd..45e7c39e80d5 100644
--- a/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi
@@ -1,3 +1,5 @@
1#include <dt-bindings/gpio/gpio.h>
2
1/ { 3/ {
2 regulators { 4 regulators {
3 compatible = "simple-bus"; 5 compatible = "simple-bus";
@@ -181,7 +183,7 @@
181&usdhc2 { /* module slot */ 183&usdhc2 { /* module slot */
182 pinctrl-names = "default"; 184 pinctrl-names = "default";
183 pinctrl-0 = <&pinctrl_usdhc2>; 185 pinctrl-0 = <&pinctrl_usdhc2>;
184 cd-gpios = <&gpio2 2 0>; 186 cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
185 status = "okay"; 187 status = "okay";
186}; 188};
187 189
diff --git a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
index b5756c21ea1d..4493f6e99330 100644
--- a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
@@ -318,7 +318,7 @@
318&usdhc3 { 318&usdhc3 {
319 pinctrl-names = "default"; 319 pinctrl-names = "default";
320 pinctrl-0 = <&pinctrl_usdhc3>; 320 pinctrl-0 = <&pinctrl_usdhc3>;
321 cd-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>; 321 cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
322 vmmc-supply = <&reg_3p3v>; 322 vmmc-supply = <&reg_3p3v>;
323 status = "okay"; 323 status = "okay";
324}; 324};
diff --git a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
index 86f03c1b147c..a857d1294609 100644
--- a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
@@ -324,7 +324,7 @@
324&usdhc3 { 324&usdhc3 {
325 pinctrl-names = "default"; 325 pinctrl-names = "default";
326 pinctrl-0 = <&pinctrl_usdhc3>; 326 pinctrl-0 = <&pinctrl_usdhc3>;
327 cd-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>; 327 cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
328 vmmc-supply = <&reg_3p3v>; 328 vmmc-supply = <&reg_3p3v>;
329 status = "okay"; 329 status = "okay";
330}; 330};
diff --git a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
index 4a8d97f47759..1afe3385e2d2 100644
--- a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
@@ -417,7 +417,7 @@
417&usdhc3 { 417&usdhc3 {
418 pinctrl-names = "default"; 418 pinctrl-names = "default";
419 pinctrl-0 = <&pinctrl_usdhc3>; 419 pinctrl-0 = <&pinctrl_usdhc3>;
420 cd-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>; 420 cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
421 vmmc-supply = <&reg_3p3v>; 421 vmmc-supply = <&reg_3p3v>;
422 status = "okay"; 422 status = "okay";
423}; 423};
diff --git a/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi b/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi
index 62a82f3eba88..6dd0b764e036 100644
--- a/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi
@@ -299,6 +299,6 @@
299 &pinctrl_hummingboard_usdhc2 299 &pinctrl_hummingboard_usdhc2
300 >; 300 >;
301 vmmc-supply = <&reg_3p3v>; 301 vmmc-supply = <&reg_3p3v>;
302 cd-gpios = <&gpio1 4 0>; 302 cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
303 status = "okay"; 303 status = "okay";
304}; 304};
diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
index 3af16dfe417b..d7fe6672d00c 100644
--- a/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
@@ -453,7 +453,7 @@
453&usdhc3 { 453&usdhc3 {
454 pinctrl-names = "default"; 454 pinctrl-names = "default";
455 pinctrl-0 = <&pinctrl_usdhc3>; 455 pinctrl-0 = <&pinctrl_usdhc3>;
456 cd-gpios = <&gpio7 0 0>; 456 cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
457 vmmc-supply = <&reg_3p3v>; 457 vmmc-supply = <&reg_3p3v>;
458 status = "okay"; 458 status = "okay";
459}; 459};
@@ -461,7 +461,7 @@
461&usdhc4 { 461&usdhc4 {
462 pinctrl-names = "default"; 462 pinctrl-names = "default";
463 pinctrl-0 = <&pinctrl_usdhc4>; 463 pinctrl-0 = <&pinctrl_usdhc4>;
464 cd-gpios = <&gpio2 6 0>; 464 cd-gpios = <&gpio2 6 GPIO_ACTIVE_LOW>;
465 vmmc-supply = <&reg_3p3v>; 465 vmmc-supply = <&reg_3p3v>;
466 status = "okay"; 466 status = "okay";
467}; 467};
diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
index 1ce6133b67f5..9e6ecd99b472 100644
--- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
@@ -409,8 +409,8 @@
409&usdhc2 { 409&usdhc2 {
410 pinctrl-names = "default"; 410 pinctrl-names = "default";
411 pinctrl-0 = <&pinctrl_usdhc2>; 411 pinctrl-0 = <&pinctrl_usdhc2>;
412 cd-gpios = <&gpio1 4 0>; 412 cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
413 wp-gpios = <&gpio1 2 0>; 413 wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
414 status = "disabled"; 414 status = "disabled";
415}; 415};
416 416
@@ -418,7 +418,7 @@
418 pinctrl-names = "default"; 418 pinctrl-names = "default";
419 pinctrl-0 = <&pinctrl_usdhc3 419 pinctrl-0 = <&pinctrl_usdhc3
420 &pinctrl_usdhc3_cdwp>; 420 &pinctrl_usdhc3_cdwp>;
421 cd-gpios = <&gpio1 27 0>; 421 cd-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>;
422 wp-gpios = <&gpio1 29 0>; 422 wp-gpios = <&gpio1 29 GPIO_ACTIVE_HIGH>;
423 status = "disabled"; 423 status = "disabled";
424}; 424};
diff --git a/arch/arm/boot/dts/imx6qdl-rex.dtsi b/arch/arm/boot/dts/imx6qdl-rex.dtsi
index 488a640796ac..3373fd958e95 100644
--- a/arch/arm/boot/dts/imx6qdl-rex.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-rex.dtsi
@@ -342,7 +342,7 @@
342 pinctrl-0 = <&pinctrl_usdhc2>; 342 pinctrl-0 = <&pinctrl_usdhc2>;
343 bus-width = <4>; 343 bus-width = <4>;
344 cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>; 344 cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
345 wp-gpios = <&gpio2 3 GPIO_ACTIVE_LOW>; 345 wp-gpios = <&gpio2 3 GPIO_ACTIVE_HIGH>;
346 status = "okay"; 346 status = "okay";
347}; 347};
348 348
@@ -351,6 +351,6 @@
351 pinctrl-0 = <&pinctrl_usdhc3>; 351 pinctrl-0 = <&pinctrl_usdhc3>;
352 bus-width = <4>; 352 bus-width = <4>;
353 cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>; 353 cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
354 wp-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>; 354 wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
355 status = "okay"; 355 status = "okay";
356}; 356};
diff --git a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
index 3b24b12651b2..e329ca5c3322 100644
--- a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
@@ -467,8 +467,8 @@
467 pinctrl-0 = <&pinctrl_usdhc3>; 467 pinctrl-0 = <&pinctrl_usdhc3>;
468 pinctrl-1 = <&pinctrl_usdhc3_100mhz>; 468 pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
469 pinctrl-2 = <&pinctrl_usdhc3_200mhz>; 469 pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
470 cd-gpios = <&gpio6 15 0>; 470 cd-gpios = <&gpio6 15 GPIO_ACTIVE_LOW>;
471 wp-gpios = <&gpio1 13 0>; 471 wp-gpios = <&gpio1 13 GPIO_ACTIVE_HIGH>;
472 status = "okay"; 472 status = "okay";
473}; 473};
474 474
diff --git a/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi b/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi
index e00c44f6a0df..782379320517 100644
--- a/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi
@@ -448,8 +448,8 @@
448&usdhc3 { 448&usdhc3 {
449 pinctrl-names = "default"; 449 pinctrl-names = "default";
450 pinctrl-0 = <&pinctrl_usdhc3>; 450 pinctrl-0 = <&pinctrl_usdhc3>;
451 cd-gpios = <&gpio7 0 0>; 451 cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
452 wp-gpios = <&gpio7 1 0>; 452 wp-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>;
453 vmmc-supply = <&reg_3p3v>; 453 vmmc-supply = <&reg_3p3v>;
454 status = "okay"; 454 status = "okay";
455}; 455};
@@ -457,7 +457,7 @@
457&usdhc4 { 457&usdhc4 {
458 pinctrl-names = "default"; 458 pinctrl-names = "default";
459 pinctrl-0 = <&pinctrl_usdhc4>; 459 pinctrl-0 = <&pinctrl_usdhc4>;
460 cd-gpios = <&gpio2 6 0>; 460 cd-gpios = <&gpio2 6 GPIO_ACTIVE_LOW>;
461 vmmc-supply = <&reg_3p3v>; 461 vmmc-supply = <&reg_3p3v>;
462 status = "okay"; 462 status = "okay";
463}; 463};
diff --git a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
index a626e6dd8022..944eb81cb2b8 100644
--- a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
@@ -562,8 +562,8 @@
562 pinctrl-names = "default"; 562 pinctrl-names = "default";
563 pinctrl-0 = <&pinctrl_usdhc2>; 563 pinctrl-0 = <&pinctrl_usdhc2>;
564 bus-width = <8>; 564 bus-width = <8>;
565 cd-gpios = <&gpio2 2 0>; 565 cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
566 wp-gpios = <&gpio2 3 0>; 566 wp-gpios = <&gpio2 3 GPIO_ACTIVE_HIGH>;
567 status = "okay"; 567 status = "okay";
568}; 568};
569 569
@@ -571,8 +571,8 @@
571 pinctrl-names = "default"; 571 pinctrl-names = "default";
572 pinctrl-0 = <&pinctrl_usdhc3>; 572 pinctrl-0 = <&pinctrl_usdhc3>;
573 bus-width = <8>; 573 bus-width = <8>;
574 cd-gpios = <&gpio2 0 0>; 574 cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
575 wp-gpios = <&gpio2 1 0>; 575 wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
576 status = "okay"; 576 status = "okay";
577}; 577};
578 578
diff --git a/arch/arm/boot/dts/imx6qdl-tx6.dtsi b/arch/arm/boot/dts/imx6qdl-tx6.dtsi
index f02b80b41d4f..da08de324e9e 100644
--- a/arch/arm/boot/dts/imx6qdl-tx6.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-tx6.dtsi
@@ -680,7 +680,7 @@
680 pinctrl-0 = <&pinctrl_usdhc1>; 680 pinctrl-0 = <&pinctrl_usdhc1>;
681 bus-width = <4>; 681 bus-width = <4>;
682 no-1-8-v; 682 no-1-8-v;
683 cd-gpios = <&gpio7 2 0>; 683 cd-gpios = <&gpio7 2 GPIO_ACTIVE_LOW>;
684 fsl,wp-controller; 684 fsl,wp-controller;
685 status = "okay"; 685 status = "okay";
686}; 686};
@@ -690,7 +690,7 @@
690 pinctrl-0 = <&pinctrl_usdhc2>; 690 pinctrl-0 = <&pinctrl_usdhc2>;
691 bus-width = <4>; 691 bus-width = <4>;
692 no-1-8-v; 692 no-1-8-v;
693 cd-gpios = <&gpio7 3 0>; 693 cd-gpios = <&gpio7 3 GPIO_ACTIVE_LOW>;
694 fsl,wp-controller; 694 fsl,wp-controller;
695 status = "okay"; 695 status = "okay";
696}; 696};
diff --git a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
index 5fb091675582..9e096d811bed 100644
--- a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
@@ -9,6 +9,8 @@
9 * 9 *
10 */ 10 */
11 11
12#include <dt-bindings/gpio/gpio.h>
13
12/ { 14/ {
13 regulators { 15 regulators {
14 compatible = "simple-bus"; 16 compatible = "simple-bus";
@@ -250,13 +252,13 @@
250&usdhc1 { 252&usdhc1 {
251 pinctrl-names = "default"; 253 pinctrl-names = "default";
252 pinctrl-0 = <&pinctrl_usdhc1>; 254 pinctrl-0 = <&pinctrl_usdhc1>;
253 cd-gpios = <&gpio1 2 0>; 255 cd-gpios = <&gpio1 2 GPIO_ACTIVE_LOW>;
254 status = "okay"; 256 status = "okay";
255}; 257};
256 258
257&usdhc3 { 259&usdhc3 {
258 pinctrl-names = "default"; 260 pinctrl-names = "default";
259 pinctrl-0 = <&pinctrl_usdhc3>; 261 pinctrl-0 = <&pinctrl_usdhc3>;
260 cd-gpios = <&gpio3 9 0>; 262 cd-gpios = <&gpio3 9 GPIO_ACTIVE_LOW>;
261 status = "okay"; 263 status = "okay";
262}; 264};
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
index e6d13592080d..b57033e8c633 100644
--- a/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/imx6qdl.dtsi
@@ -181,10 +181,10 @@
181 interrupt-names = "msi"; 181 interrupt-names = "msi";
182 #interrupt-cells = <1>; 182 #interrupt-cells = <1>;
183 interrupt-map-mask = <0 0 0 0x7>; 183 interrupt-map-mask = <0 0 0 0x7>;
184 interrupt-map = <0 0 0 1 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>, 184 interrupt-map = <0 0 0 1 &gpc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
185 <0 0 0 2 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>, 185 <0 0 0 2 &gpc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
186 <0 0 0 3 &intc GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>, 186 <0 0 0 3 &gpc GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
187 <0 0 0 4 &intc GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>; 187 <0 0 0 4 &gpc GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
188 clocks = <&clks IMX6QDL_CLK_PCIE_AXI>, 188 clocks = <&clks IMX6QDL_CLK_PCIE_AXI>,
189 <&clks IMX6QDL_CLK_LVDS1_GATE>, 189 <&clks IMX6QDL_CLK_LVDS1_GATE>,
190 <&clks IMX6QDL_CLK_PCIE_REF_125M>; 190 <&clks IMX6QDL_CLK_PCIE_REF_125M>;
diff --git a/arch/arm/boot/dts/imx6sl-evk.dts b/arch/arm/boot/dts/imx6sl-evk.dts
index 945887d3fdb3..b84dff2e94ea 100644
--- a/arch/arm/boot/dts/imx6sl-evk.dts
+++ b/arch/arm/boot/dts/imx6sl-evk.dts
@@ -617,8 +617,8 @@
617 pinctrl-1 = <&pinctrl_usdhc1_100mhz>; 617 pinctrl-1 = <&pinctrl_usdhc1_100mhz>;
618 pinctrl-2 = <&pinctrl_usdhc1_200mhz>; 618 pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
619 bus-width = <8>; 619 bus-width = <8>;
620 cd-gpios = <&gpio4 7 0>; 620 cd-gpios = <&gpio4 7 GPIO_ACTIVE_LOW>;
621 wp-gpios = <&gpio4 6 0>; 621 wp-gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>;
622 status = "okay"; 622 status = "okay";
623}; 623};
624 624
@@ -627,8 +627,8 @@
627 pinctrl-0 = <&pinctrl_usdhc2>; 627 pinctrl-0 = <&pinctrl_usdhc2>;
628 pinctrl-1 = <&pinctrl_usdhc2_100mhz>; 628 pinctrl-1 = <&pinctrl_usdhc2_100mhz>;
629 pinctrl-2 = <&pinctrl_usdhc2_200mhz>; 629 pinctrl-2 = <&pinctrl_usdhc2_200mhz>;
630 cd-gpios = <&gpio5 0 0>; 630 cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
631 wp-gpios = <&gpio4 29 0>; 631 wp-gpios = <&gpio4 29 GPIO_ACTIVE_HIGH>;
632 status = "okay"; 632 status = "okay";
633}; 633};
634 634
@@ -637,6 +637,6 @@
637 pinctrl-0 = <&pinctrl_usdhc3>; 637 pinctrl-0 = <&pinctrl_usdhc3>;
638 pinctrl-1 = <&pinctrl_usdhc3_100mhz>; 638 pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
639 pinctrl-2 = <&pinctrl_usdhc3_200mhz>; 639 pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
640 cd-gpios = <&gpio3 22 0>; 640 cd-gpios = <&gpio3 22 GPIO_ACTIVE_LOW>;
641 status = "okay"; 641 status = "okay";
642}; 642};
diff --git a/arch/arm/boot/dts/imx6sx-sabreauto.dts b/arch/arm/boot/dts/imx6sx-sabreauto.dts
index e3c0b63c2205..115f3fd78971 100644
--- a/arch/arm/boot/dts/imx6sx-sabreauto.dts
+++ b/arch/arm/boot/dts/imx6sx-sabreauto.dts
@@ -49,7 +49,7 @@
49 pinctrl-1 = <&pinctrl_usdhc3_100mhz>; 49 pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
50 pinctrl-2 = <&pinctrl_usdhc3_200mhz>; 50 pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
51 bus-width = <8>; 51 bus-width = <8>;
52 cd-gpios = <&gpio7 10 GPIO_ACTIVE_HIGH>; 52 cd-gpios = <&gpio7 10 GPIO_ACTIVE_LOW>;
53 wp-gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>; 53 wp-gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>;
54 keep-power-in-suspend; 54 keep-power-in-suspend;
55 enable-sdio-wakeup; 55 enable-sdio-wakeup;
@@ -61,7 +61,7 @@
61 pinctrl-names = "default"; 61 pinctrl-names = "default";
62 pinctrl-0 = <&pinctrl_usdhc4>; 62 pinctrl-0 = <&pinctrl_usdhc4>;
63 bus-width = <8>; 63 bus-width = <8>;
64 cd-gpios = <&gpio7 11 GPIO_ACTIVE_HIGH>; 64 cd-gpios = <&gpio7 11 GPIO_ACTIVE_LOW>;
65 no-1-8-v; 65 no-1-8-v;
66 keep-power-in-suspend; 66 keep-power-in-suspend;
67 enable-sdio-wakup; 67 enable-sdio-wakup;
diff --git a/arch/arm/boot/dts/imx6sx-sdb.dtsi b/arch/arm/boot/dts/imx6sx-sdb.dtsi
index cef04cef3a80..ac88c3467078 100644
--- a/arch/arm/boot/dts/imx6sx-sdb.dtsi
+++ b/arch/arm/boot/dts/imx6sx-sdb.dtsi
@@ -293,7 +293,7 @@
293 pinctrl-1 = <&pinctrl_usdhc3_100mhz>; 293 pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
294 pinctrl-2 = <&pinctrl_usdhc3_200mhz>; 294 pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
295 bus-width = <8>; 295 bus-width = <8>;
296 cd-gpios = <&gpio2 10 GPIO_ACTIVE_HIGH>; 296 cd-gpios = <&gpio2 10 GPIO_ACTIVE_LOW>;
297 wp-gpios = <&gpio2 15 GPIO_ACTIVE_HIGH>; 297 wp-gpios = <&gpio2 15 GPIO_ACTIVE_HIGH>;
298 keep-power-in-suspend; 298 keep-power-in-suspend;
299 enable-sdio-wakeup; 299 enable-sdio-wakeup;
@@ -304,7 +304,7 @@
304&usdhc4 { 304&usdhc4 {
305 pinctrl-names = "default"; 305 pinctrl-names = "default";
306 pinctrl-0 = <&pinctrl_usdhc4>; 306 pinctrl-0 = <&pinctrl_usdhc4>;
307 cd-gpios = <&gpio6 21 GPIO_ACTIVE_HIGH>; 307 cd-gpios = <&gpio6 21 GPIO_ACTIVE_LOW>;
308 wp-gpios = <&gpio6 20 GPIO_ACTIVE_HIGH>; 308 wp-gpios = <&gpio6 20 GPIO_ACTIVE_HIGH>;
309 status = "okay"; 309 status = "okay";
310}; 310};
diff --git a/arch/arm/boot/dts/imx7d-sdb.dts b/arch/arm/boot/dts/imx7d-sdb.dts
index 4d1a4b977d84..fdd1d7c9a5cc 100644
--- a/arch/arm/boot/dts/imx7d-sdb.dts
+++ b/arch/arm/boot/dts/imx7d-sdb.dts
@@ -234,8 +234,8 @@
234&usdhc1 { 234&usdhc1 {
235 pinctrl-names = "default"; 235 pinctrl-names = "default";
236 pinctrl-0 = <&pinctrl_usdhc1>; 236 pinctrl-0 = <&pinctrl_usdhc1>;
237 cd-gpios = <&gpio5 0 0>; 237 cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
238 wp-gpios = <&gpio5 1 0>; 238 wp-gpios = <&gpio5 1 GPIO_ACTIVE_HIGH>;
239 enable-sdio-wakeup; 239 enable-sdio-wakeup;
240 keep-power-in-suspend; 240 keep-power-in-suspend;
241 status = "okay"; 241 status = "okay";
diff --git a/arch/arm/boot/dts/k2e-clocks.dtsi b/arch/arm/boot/dts/k2e-clocks.dtsi
index 4773d6af66a0..d56d68fe7ffc 100644
--- a/arch/arm/boot/dts/k2e-clocks.dtsi
+++ b/arch/arm/boot/dts/k2e-clocks.dtsi
@@ -13,9 +13,8 @@ clocks {
13 #clock-cells = <0>; 13 #clock-cells = <0>;
14 compatible = "ti,keystone,main-pll-clock"; 14 compatible = "ti,keystone,main-pll-clock";
15 clocks = <&refclksys>; 15 clocks = <&refclksys>;
16 reg = <0x02620350 4>, <0x02310110 4>; 16 reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
17 reg-names = "control", "multiplier"; 17 reg-names = "control", "multiplier", "post-divider";
18 fixed-postdiv = <2>;
19 }; 18 };
20 19
21 papllclk: papllclk@2620358 { 20 papllclk: papllclk@2620358 {
diff --git a/arch/arm/boot/dts/k2e.dtsi b/arch/arm/boot/dts/k2e.dtsi
index 50e555eab50d..675fb8e492c6 100644
--- a/arch/arm/boot/dts/k2e.dtsi
+++ b/arch/arm/boot/dts/k2e.dtsi
@@ -86,7 +86,7 @@
86 gpio,syscon-dev = <&devctrl 0x240>; 86 gpio,syscon-dev = <&devctrl 0x240>;
87 }; 87 };
88 88
89 pcie@21020000 { 89 pcie1: pcie@21020000 {
90 compatible = "ti,keystone-pcie","snps,dw-pcie"; 90 compatible = "ti,keystone-pcie","snps,dw-pcie";
91 clocks = <&clkpcie1>; 91 clocks = <&clkpcie1>;
92 clock-names = "pcie"; 92 clock-names = "pcie";
@@ -96,6 +96,7 @@
96 ranges = <0x81000000 0 0 0x23260000 0x4000 0x4000 96 ranges = <0x81000000 0 0 0x23260000 0x4000 0x4000
97 0x82000000 0 0x60000000 0x60000000 0 0x10000000>; 97 0x82000000 0 0x60000000 0x60000000 0 0x10000000>;
98 98
99 status = "disabled";
99 device_type = "pci"; 100 device_type = "pci";
100 num-lanes = <2>; 101 num-lanes = <2>;
101 102
@@ -130,10 +131,17 @@
130 <GIC_SPI 376 IRQ_TYPE_EDGE_RISING>; 131 <GIC_SPI 376 IRQ_TYPE_EDGE_RISING>;
131 }; 132 };
132 }; 133 };
134
135 mdio: mdio@24200f00 {
136 compatible = "ti,keystone_mdio", "ti,davinci_mdio";
137 #address-cells = <1>;
138 #size-cells = <0>;
139 reg = <0x24200f00 0x100>;
140 status = "disabled";
141 clocks = <&clkcpgmac>;
142 clock-names = "fck";
143 bus_freq = <2500000>;
144 };
133 /include/ "k2e-netcp.dtsi" 145 /include/ "k2e-netcp.dtsi"
134 }; 146 };
135}; 147};
136
137&mdio {
138 reg = <0x24200f00 0x100>;
139};
diff --git a/arch/arm/boot/dts/k2hk-clocks.dtsi b/arch/arm/boot/dts/k2hk-clocks.dtsi
index d5adee3c0067..af9b7190533a 100644
--- a/arch/arm/boot/dts/k2hk-clocks.dtsi
+++ b/arch/arm/boot/dts/k2hk-clocks.dtsi
@@ -22,9 +22,8 @@ clocks {
22 #clock-cells = <0>; 22 #clock-cells = <0>;
23 compatible = "ti,keystone,main-pll-clock"; 23 compatible = "ti,keystone,main-pll-clock";
24 clocks = <&refclksys>; 24 clocks = <&refclksys>;
25 reg = <0x02620350 4>, <0x02310110 4>; 25 reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
26 reg-names = "control", "multiplier"; 26 reg-names = "control", "multiplier", "post-divider";
27 fixed-postdiv = <2>;
28 }; 27 };
29 28
30 papllclk: papllclk@2620358 { 29 papllclk: papllclk@2620358 {
diff --git a/arch/arm/boot/dts/k2hk.dtsi b/arch/arm/boot/dts/k2hk.dtsi
index ae6472407b22..d0810a5f2968 100644
--- a/arch/arm/boot/dts/k2hk.dtsi
+++ b/arch/arm/boot/dts/k2hk.dtsi
@@ -98,6 +98,17 @@
98 #gpio-cells = <2>; 98 #gpio-cells = <2>;
99 gpio,syscon-dev = <&devctrl 0x25c>; 99 gpio,syscon-dev = <&devctrl 0x25c>;
100 }; 100 };
101
102 mdio: mdio@02090300 {
103 compatible = "ti,keystone_mdio", "ti,davinci_mdio";
104 #address-cells = <1>;
105 #size-cells = <0>;
106 reg = <0x02090300 0x100>;
107 status = "disabled";
108 clocks = <&clkcpgmac>;
109 clock-names = "fck";
110 bus_freq = <2500000>;
111 };
101 /include/ "k2hk-netcp.dtsi" 112 /include/ "k2hk-netcp.dtsi"
102 }; 113 };
103}; 114};
diff --git a/arch/arm/boot/dts/k2l-clocks.dtsi b/arch/arm/boot/dts/k2l-clocks.dtsi
index eb1e3e29f073..ef8464bb11ff 100644
--- a/arch/arm/boot/dts/k2l-clocks.dtsi
+++ b/arch/arm/boot/dts/k2l-clocks.dtsi
@@ -22,9 +22,8 @@ clocks {
22 #clock-cells = <0>; 22 #clock-cells = <0>;
23 compatible = "ti,keystone,main-pll-clock"; 23 compatible = "ti,keystone,main-pll-clock";
24 clocks = <&refclksys>; 24 clocks = <&refclksys>;
25 reg = <0x02620350 4>, <0x02310110 4>; 25 reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
26 reg-names = "control", "multiplier"; 26 reg-names = "control", "multiplier", "post-divider";
27 fixed-postdiv = <2>;
28 }; 27 };
29 28
30 papllclk: papllclk@2620358 { 29 papllclk: papllclk@2620358 {
diff --git a/arch/arm/boot/dts/k2l.dtsi b/arch/arm/boot/dts/k2l.dtsi
index 0e007483615e..49fd414f680c 100644
--- a/arch/arm/boot/dts/k2l.dtsi
+++ b/arch/arm/boot/dts/k2l.dtsi
@@ -29,7 +29,6 @@
29 }; 29 };
30 30
31 soc { 31 soc {
32
33 /include/ "k2l-clocks.dtsi" 32 /include/ "k2l-clocks.dtsi"
34 33
35 uart2: serial@02348400 { 34 uart2: serial@02348400 {
@@ -79,6 +78,17 @@
79 #gpio-cells = <2>; 78 #gpio-cells = <2>;
80 gpio,syscon-dev = <&devctrl 0x24c>; 79 gpio,syscon-dev = <&devctrl 0x24c>;
81 }; 80 };
81
82 mdio: mdio@26200f00 {
83 compatible = "ti,keystone_mdio", "ti,davinci_mdio";
84 #address-cells = <1>;
85 #size-cells = <0>;
86 reg = <0x26200f00 0x100>;
87 status = "disabled";
88 clocks = <&clkcpgmac>;
89 clock-names = "fck";
90 bus_freq = <2500000>;
91 };
82 /include/ "k2l-netcp.dtsi" 92 /include/ "k2l-netcp.dtsi"
83 }; 93 };
84}; 94};
@@ -96,7 +106,3 @@
96 /* Pin muxed. Enabled and configured by Bootloader */ 106 /* Pin muxed. Enabled and configured by Bootloader */
97 status = "disabled"; 107 status = "disabled";
98}; 108};
99
100&mdio {
101 reg = <0x26200f00 0x100>;
102};
diff --git a/arch/arm/boot/dts/keystone.dtsi b/arch/arm/boot/dts/keystone.dtsi
index c06542b2c954..72816d65f7ec 100644
--- a/arch/arm/boot/dts/keystone.dtsi
+++ b/arch/arm/boot/dts/keystone.dtsi
@@ -267,17 +267,6 @@
267 1 0 0x21000A00 0x00000100>; 267 1 0 0x21000A00 0x00000100>;
268 }; 268 };
269 269
270 mdio: mdio@02090300 {
271 compatible = "ti,keystone_mdio", "ti,davinci_mdio";
272 #address-cells = <1>;
273 #size-cells = <0>;
274 reg = <0x02090300 0x100>;
275 status = "disabled";
276 clocks = <&clkpa>;
277 clock-names = "fck";
278 bus_freq = <2500000>;
279 };
280
281 kirq0: keystone_irq@26202a0 { 270 kirq0: keystone_irq@26202a0 {
282 compatible = "ti,keystone-irq"; 271 compatible = "ti,keystone-irq";
283 interrupts = <GIC_SPI 4 IRQ_TYPE_EDGE_RISING>; 272 interrupts = <GIC_SPI 4 IRQ_TYPE_EDGE_RISING>;
@@ -286,7 +275,7 @@
286 ti,syscon-dev = <&devctrl 0x2a0>; 275 ti,syscon-dev = <&devctrl 0x2a0>;
287 }; 276 };
288 277
289 pcie@21800000 { 278 pcie0: pcie@21800000 {
290 compatible = "ti,keystone-pcie", "snps,dw-pcie"; 279 compatible = "ti,keystone-pcie", "snps,dw-pcie";
291 clocks = <&clkpcie>; 280 clocks = <&clkpcie>;
292 clock-names = "pcie"; 281 clock-names = "pcie";
@@ -296,6 +285,7 @@
296 ranges = <0x81000000 0 0 0x23250000 0 0x4000 285 ranges = <0x81000000 0 0 0x23250000 0 0x4000
297 0x82000000 0 0x50000000 0x50000000 0 0x10000000>; 286 0x82000000 0 0x50000000 0x50000000 0 0x10000000>;
298 287
288 status = "disabled";
299 device_type = "pci"; 289 device_type = "pci";
300 num-lanes = <2>; 290 num-lanes = <2>;
301 291
diff --git a/arch/arm/boot/dts/omap2430.dtsi b/arch/arm/boot/dts/omap2430.dtsi
index 11a7963be003..2390f387c271 100644
--- a/arch/arm/boot/dts/omap2430.dtsi
+++ b/arch/arm/boot/dts/omap2430.dtsi
@@ -51,7 +51,8 @@
51 }; 51 };
52 52
53 scm_conf: scm_conf@270 { 53 scm_conf: scm_conf@270 {
54 compatible = "syscon"; 54 compatible = "syscon",
55 "simple-bus";
55 reg = <0x270 0x240>; 56 reg = <0x270 0x240>;
56 #address-cells = <1>; 57 #address-cells = <1>;
57 #size-cells = <1>; 58 #size-cells = <1>;
diff --git a/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi b/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi
index 233c69e50ae3..df8908adb0cb 100644
--- a/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi
+++ b/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi
@@ -120,7 +120,7 @@
120 120
121 lcd0: display@0 { 121 lcd0: display@0 {
122 compatible = "lgphilips,lb035q02"; 122 compatible = "lgphilips,lb035q02";
123 label = "lcd"; 123 label = "lcd35";
124 124
125 reg = <1>; /* CS1 */ 125 reg = <1>; /* CS1 */
126 spi-max-frequency = <10000000>; 126 spi-max-frequency = <10000000>;
diff --git a/arch/arm/boot/dts/omap3-overo-common-lcd43.dtsi b/arch/arm/boot/dts/omap3-overo-common-lcd43.dtsi
index f5395b7da912..048fd216970a 100644
--- a/arch/arm/boot/dts/omap3-overo-common-lcd43.dtsi
+++ b/arch/arm/boot/dts/omap3-overo-common-lcd43.dtsi
@@ -98,7 +98,7 @@
98 98
99 lcd0: display@0 { 99 lcd0: display@0 {
100 compatible = "samsung,lte430wq-f0c", "panel-dpi"; 100 compatible = "samsung,lte430wq-f0c", "panel-dpi";
101 label = "lcd"; 101 label = "lcd43";
102 102
103 pinctrl-names = "default"; 103 pinctrl-names = "default";
104 pinctrl-0 = <&lte430_pins>; 104 pinctrl-0 = <&lte430_pins>;
diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
index f884d6adb71e..abc4473e6f8a 100644
--- a/arch/arm/boot/dts/omap4.dtsi
+++ b/arch/arm/boot/dts/omap4.dtsi
@@ -191,7 +191,8 @@
191 }; 191 };
192 192
193 omap4_padconf_global: omap4_padconf_global@5a0 { 193 omap4_padconf_global: omap4_padconf_global@5a0 {
194 compatible = "syscon"; 194 compatible = "syscon",
195 "simple-bus";
195 reg = <0x5a0 0x170>; 196 reg = <0x5a0 0x170>;
196 #address-cells = <1>; 197 #address-cells = <1>;
197 #size-cells = <1>; 198 #size-cells = <1>;
@@ -551,6 +552,7 @@
551 reg = <0x4a066000 0x100>; 552 reg = <0x4a066000 0x100>;
552 interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>; 553 interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
553 ti,hwmods = "mmu_dsp"; 554 ti,hwmods = "mmu_dsp";
555 #iommu-cells = <0>;
554 }; 556 };
555 557
556 mmu_ipu: mmu@55082000 { 558 mmu_ipu: mmu@55082000 {
@@ -558,6 +560,7 @@
558 reg = <0x55082000 0x100>; 560 reg = <0x55082000 0x100>;
559 interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>; 561 interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
560 ti,hwmods = "mmu_ipu"; 562 ti,hwmods = "mmu_ipu";
563 #iommu-cells = <0>;
561 ti,iommu-bus-err-back; 564 ti,iommu-bus-err-back;
562 }; 565 };
563 566
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index 7d24ae0306b5..b1a1263e6001 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -180,7 +180,8 @@
180 }; 180 };
181 181
182 omap5_padconf_global: omap5_padconf_global@5a0 { 182 omap5_padconf_global: omap5_padconf_global@5a0 {
183 compatible = "syscon"; 183 compatible = "syscon",
184 "simple-bus";
184 reg = <0x5a0 0xec>; 185 reg = <0x5a0 0xec>;
185 #address-cells = <1>; 186 #address-cells = <1>;
186 #size-cells = <1>; 187 #size-cells = <1>;
@@ -612,6 +613,7 @@
612 reg = <0x4a066000 0x100>; 613 reg = <0x4a066000 0x100>;
613 interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>; 614 interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
614 ti,hwmods = "mmu_dsp"; 615 ti,hwmods = "mmu_dsp";
616 #iommu-cells = <0>;
615 }; 617 };
616 618
617 mmu_ipu: mmu@55082000 { 619 mmu_ipu: mmu@55082000 {
@@ -619,6 +621,7 @@
619 reg = <0x55082000 0x100>; 621 reg = <0x55082000 0x100>;
620 interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>; 622 interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
621 ti,hwmods = "mmu_ipu"; 623 ti,hwmods = "mmu_ipu";
624 #iommu-cells = <0>;
622 ti,iommu-bus-err-back; 625 ti,iommu-bus-err-back;
623 }; 626 };
624 627
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts b/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts
index 71468a7eb28f..5e17fd147728 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts
+++ b/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts
@@ -60,27 +60,27 @@
60 rxc-skew-ps = <2000>; 60 rxc-skew-ps = <2000>;
61}; 61};
62 62
63&mmc0 {
64 vmmc-supply = <&regulator_3_3v>;
65 vqmmc-supply = <&regulator_3_3v>;
66};
67
68&usb1 {
69 status = "okay";
70};
71
72&gpio2 { 63&gpio2 {
73 status = "okay"; 64 status = "okay";
74}; 65};
75 66
76&i2c1{ 67&i2c1 {
77 status = "okay"; 68 status = "okay";
78 69
79 accel1: accel1@53{ 70 accel1: accelerometer@53 {
80 compatible = "adxl34x"; 71 compatible = "adi,adxl345";
81 reg = <0x53>; 72 reg = <0x53>;
82 73
83 interrupt-parent = < &portc >; 74 interrupt-parent = <&portc>;
84 interrupts = <3 2>; 75 interrupts = <3 2>;
85 }; 76 };
86}; 77};
78
79&mmc0 {
80 vmmc-supply = <&regulator_3_3v>;
81 vqmmc-supply = <&regulator_3_3v>;
82};
83
84&usb1 {
85 status = "okay";
86};
diff --git a/arch/arm/boot/dts/spear1310-evb.dts b/arch/arm/boot/dts/spear1310-evb.dts
index d42c84b1df8d..e48857249ce7 100644
--- a/arch/arm/boot/dts/spear1310-evb.dts
+++ b/arch/arm/boot/dts/spear1310-evb.dts
@@ -1,7 +1,7 @@
1/* 1/*
2 * DTS file for SPEAr1310 Evaluation Baord 2 * DTS file for SPEAr1310 Evaluation Baord
3 * 3 *
4 * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> 4 * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
5 * 5 *
6 * The code contained herein is licensed under the GNU General Public 6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License 7 * License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear1310.dtsi b/arch/arm/boot/dts/spear1310.dtsi
index 9d342920695a..54bc6d3cf290 100644
--- a/arch/arm/boot/dts/spear1310.dtsi
+++ b/arch/arm/boot/dts/spear1310.dtsi
@@ -1,7 +1,7 @@
1/* 1/*
2 * DTS file for all SPEAr1310 SoCs 2 * DTS file for all SPEAr1310 SoCs
3 * 3 *
4 * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> 4 * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
5 * 5 *
6 * The code contained herein is licensed under the GNU General Public 6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License 7 * License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear1340-evb.dts b/arch/arm/boot/dts/spear1340-evb.dts
index b23e05ed1d60..c611f5606dfe 100644
--- a/arch/arm/boot/dts/spear1340-evb.dts
+++ b/arch/arm/boot/dts/spear1340-evb.dts
@@ -1,7 +1,7 @@
1/* 1/*
2 * DTS file for SPEAr1340 Evaluation Baord 2 * DTS file for SPEAr1340 Evaluation Baord
3 * 3 *
4 * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> 4 * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
5 * 5 *
6 * The code contained herein is licensed under the GNU General Public 6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License 7 * License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear1340.dtsi b/arch/arm/boot/dts/spear1340.dtsi
index 13e1aa33daa2..df2232d767ed 100644
--- a/arch/arm/boot/dts/spear1340.dtsi
+++ b/arch/arm/boot/dts/spear1340.dtsi
@@ -1,7 +1,7 @@
1/* 1/*
2 * DTS file for all SPEAr1340 SoCs 2 * DTS file for all SPEAr1340 SoCs
3 * 3 *
4 * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> 4 * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
5 * 5 *
6 * The code contained herein is licensed under the GNU General Public 6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License 7 * License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi
index 40accc87e3a2..14594ce8c18a 100644
--- a/arch/arm/boot/dts/spear13xx.dtsi
+++ b/arch/arm/boot/dts/spear13xx.dtsi
@@ -1,7 +1,7 @@
1/* 1/*
2 * DTS file for all SPEAr13xx SoCs 2 * DTS file for all SPEAr13xx SoCs
3 * 3 *
4 * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> 4 * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
5 * 5 *
6 * The code contained herein is licensed under the GNU General Public 6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License 7 * License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear300-evb.dts b/arch/arm/boot/dts/spear300-evb.dts
index 5de1431653e4..e859e8288bcd 100644
--- a/arch/arm/boot/dts/spear300-evb.dts
+++ b/arch/arm/boot/dts/spear300-evb.dts
@@ -1,7 +1,7 @@
1/* 1/*
2 * DTS file for SPEAr300 Evaluation Baord 2 * DTS file for SPEAr300 Evaluation Baord
3 * 3 *
4 * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> 4 * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
5 * 5 *
6 * The code contained herein is licensed under the GNU General Public 6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License 7 * License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear300.dtsi b/arch/arm/boot/dts/spear300.dtsi
index f79b3dfaabe6..f4e92e599729 100644
--- a/arch/arm/boot/dts/spear300.dtsi
+++ b/arch/arm/boot/dts/spear300.dtsi
@@ -1,7 +1,7 @@
1/* 1/*
2 * DTS file for SPEAr300 SoC 2 * DTS file for SPEAr300 SoC
3 * 3 *
4 * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> 4 * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
5 * 5 *
6 * The code contained herein is licensed under the GNU General Public 6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License 7 * License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear310-evb.dts b/arch/arm/boot/dts/spear310-evb.dts
index b09632963d15..070f2c1b7851 100644
--- a/arch/arm/boot/dts/spear310-evb.dts
+++ b/arch/arm/boot/dts/spear310-evb.dts
@@ -1,7 +1,7 @@
1/* 1/*
2 * DTS file for SPEAr310 Evaluation Baord 2 * DTS file for SPEAr310 Evaluation Baord
3 * 3 *
4 * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> 4 * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
5 * 5 *
6 * The code contained herein is licensed under the GNU General Public 6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License 7 * License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear310.dtsi b/arch/arm/boot/dts/spear310.dtsi
index 95372080eea6..da210b454753 100644
--- a/arch/arm/boot/dts/spear310.dtsi
+++ b/arch/arm/boot/dts/spear310.dtsi
@@ -1,7 +1,7 @@
1/* 1/*
2 * DTS file for SPEAr310 SoC 2 * DTS file for SPEAr310 SoC
3 * 3 *
4 * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> 4 * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
5 * 5 *
6 * The code contained herein is licensed under the GNU General Public 6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License 7 * License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear320-evb.dts b/arch/arm/boot/dts/spear320-evb.dts
index fdedbb514102..1b1034477923 100644
--- a/arch/arm/boot/dts/spear320-evb.dts
+++ b/arch/arm/boot/dts/spear320-evb.dts
@@ -1,7 +1,7 @@
1/* 1/*
2 * DTS file for SPEAr320 Evaluation Baord 2 * DTS file for SPEAr320 Evaluation Baord
3 * 3 *
4 * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> 4 * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
5 * 5 *
6 * The code contained herein is licensed under the GNU General Public 6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License 7 * License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear320.dtsi b/arch/arm/boot/dts/spear320.dtsi
index ffea342aeec9..22be6e5edaac 100644
--- a/arch/arm/boot/dts/spear320.dtsi
+++ b/arch/arm/boot/dts/spear320.dtsi
@@ -1,7 +1,7 @@
1/* 1/*
2 * DTS file for SPEAr320 SoC 2 * DTS file for SPEAr320 SoC
3 * 3 *
4 * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> 4 * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
5 * 5 *
6 * The code contained herein is licensed under the GNU General Public 6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License 7 * License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear3xx.dtsi b/arch/arm/boot/dts/spear3xx.dtsi
index f0e3fcf8e323..118135d75899 100644
--- a/arch/arm/boot/dts/spear3xx.dtsi
+++ b/arch/arm/boot/dts/spear3xx.dtsi
@@ -1,7 +1,7 @@
1/* 1/*
2 * DTS file for all SPEAr3xx SoCs 2 * DTS file for all SPEAr3xx SoCs
3 * 3 *
4 * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> 4 * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
5 * 5 *
6 * The code contained herein is licensed under the GNU General Public 6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License 7 * License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/ste-ccu8540.dts b/arch/arm/boot/dts/ste-ccu8540.dts
index 32dd55e5f4e6..6eaaf638e52e 100644
--- a/arch/arm/boot/dts/ste-ccu8540.dts
+++ b/arch/arm/boot/dts/ste-ccu8540.dts
@@ -17,6 +17,13 @@
17 model = "ST-Ericsson U8540 platform with Device Tree"; 17 model = "ST-Ericsson U8540 platform with Device Tree";
18 compatible = "st-ericsson,ccu8540", "st-ericsson,u8540"; 18 compatible = "st-ericsson,ccu8540", "st-ericsson,u8540";
19 19
20 /* This stablilizes the serial port enumeration */
21 aliases {
22 serial0 = &ux500_serial0;
23 serial1 = &ux500_serial1;
24 serial2 = &ux500_serial2;
25 };
26
20 memory@0 { 27 memory@0 {
21 device_type = "memory"; 28 device_type = "memory";
22 reg = <0x20000000 0x1f000000>, <0xc0000000 0x3f000000>; 29 reg = <0x20000000 0x1f000000>, <0xc0000000 0x3f000000>;
diff --git a/arch/arm/boot/dts/ste-ccu9540.dts b/arch/arm/boot/dts/ste-ccu9540.dts
index 651c56d400a4..c8b815819cfe 100644
--- a/arch/arm/boot/dts/ste-ccu9540.dts
+++ b/arch/arm/boot/dts/ste-ccu9540.dts
@@ -16,6 +16,13 @@
16 model = "ST-Ericsson CCU9540 platform with Device Tree"; 16 model = "ST-Ericsson CCU9540 platform with Device Tree";
17 compatible = "st-ericsson,ccu9540", "st-ericsson,u9540"; 17 compatible = "st-ericsson,ccu9540", "st-ericsson,u9540";
18 18
19 /* This stablilizes the serial port enumeration */
20 aliases {
21 serial0 = &ux500_serial0;
22 serial1 = &ux500_serial1;
23 serial2 = &ux500_serial2;
24 };
25
19 memory { 26 memory {
20 reg = <0x00000000 0x20000000>; 27 reg = <0x00000000 0x20000000>;
21 }; 28 };
diff --git a/arch/arm/boot/dts/ste-dbx5x0.dtsi b/arch/arm/boot/dts/ste-dbx5x0.dtsi
index 853684ad7773..b8f81fb418ce 100644
--- a/arch/arm/boot/dts/ste-dbx5x0.dtsi
+++ b/arch/arm/boot/dts/ste-dbx5x0.dtsi
@@ -15,6 +15,33 @@
15#include "skeleton.dtsi" 15#include "skeleton.dtsi"
16 16
17/ { 17/ {
18 cpus {
19 #address-cells = <1>;
20 #size-cells = <0>;
21 enable-method = "ste,dbx500-smp";
22
23 cpu-map {
24 cluster0 {
25 core0 {
26 cpu = <&CPU0>;
27 };
28 core1 {
29 cpu = <&CPU1>;
30 };
31 };
32 };
33 CPU0: cpu@300 {
34 device_type = "cpu";
35 compatible = "arm,cortex-a9";
36 reg = <0x300>;
37 };
38 CPU1: cpu@301 {
39 device_type = "cpu";
40 compatible = "arm,cortex-a9";
41 reg = <0x301>;
42 };
43 };
44
18 soc { 45 soc {
19 #address-cells = <1>; 46 #address-cells = <1>;
20 #size-cells = <1>; 47 #size-cells = <1>;
@@ -22,32 +49,6 @@
22 interrupt-parent = <&intc>; 49 interrupt-parent = <&intc>;
23 ranges; 50 ranges;
24 51
25 cpus {
26 #address-cells = <1>;
27 #size-cells = <0>;
28
29 cpu-map {
30 cluster0 {
31 core0 {
32 cpu = <&CPU0>;
33 };
34 core1 {
35 cpu = <&CPU1>;
36 };
37 };
38 };
39 CPU0: cpu@0 {
40 device_type = "cpu";
41 compatible = "arm,cortex-a9";
42 reg = <0>;
43 };
44 CPU1: cpu@1 {
45 device_type = "cpu";
46 compatible = "arm,cortex-a9";
47 reg = <1>;
48 };
49 };
50
51 ptm@801ae000 { 52 ptm@801ae000 {
52 compatible = "arm,coresight-etm3x", "arm,primecell"; 53 compatible = "arm,coresight-etm3x", "arm,primecell";
53 reg = <0x801ae000 0x1000>; 54 reg = <0x801ae000 0x1000>;
@@ -971,7 +972,7 @@
971 power-domains = <&pm_domains DOMAIN_VAPE>; 972 power-domains = <&pm_domains DOMAIN_VAPE>;
972 }; 973 };
973 974
974 uart@80120000 { 975 ux500_serial0: uart@80120000 {
975 compatible = "arm,pl011", "arm,primecell"; 976 compatible = "arm,pl011", "arm,primecell";
976 reg = <0x80120000 0x1000>; 977 reg = <0x80120000 0x1000>;
977 interrupts = <0 11 IRQ_TYPE_LEVEL_HIGH>; 978 interrupts = <0 11 IRQ_TYPE_LEVEL_HIGH>;
@@ -986,7 +987,7 @@
986 status = "disabled"; 987 status = "disabled";
987 }; 988 };
988 989
989 uart@80121000 { 990 ux500_serial1: uart@80121000 {
990 compatible = "arm,pl011", "arm,primecell"; 991 compatible = "arm,pl011", "arm,primecell";
991 reg = <0x80121000 0x1000>; 992 reg = <0x80121000 0x1000>;
992 interrupts = <0 19 IRQ_TYPE_LEVEL_HIGH>; 993 interrupts = <0 19 IRQ_TYPE_LEVEL_HIGH>;
@@ -1001,7 +1002,7 @@
1001 status = "disabled"; 1002 status = "disabled";
1002 }; 1003 };
1003 1004
1004 uart@80007000 { 1005 ux500_serial2: uart@80007000 {
1005 compatible = "arm,pl011", "arm,primecell"; 1006 compatible = "arm,pl011", "arm,primecell";
1006 reg = <0x80007000 0x1000>; 1007 reg = <0x80007000 0x1000>;
1007 interrupts = <0 26 IRQ_TYPE_LEVEL_HIGH>; 1008 interrupts = <0 26 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/ste-href.dtsi b/arch/arm/boot/dts/ste-href.dtsi
index 744c1e3a744d..6d8ce154347e 100644
--- a/arch/arm/boot/dts/ste-href.dtsi
+++ b/arch/arm/boot/dts/ste-href.dtsi
@@ -32,11 +32,11 @@
32 status = "okay"; 32 status = "okay";
33 }; 33 };
34 34
35 /* This UART is unused and thus left disabled */
35 uart@80121000 { 36 uart@80121000 {
36 pinctrl-names = "default", "sleep"; 37 pinctrl-names = "default", "sleep";
37 pinctrl-0 = <&uart1_default_mode>; 38 pinctrl-0 = <&uart1_default_mode>;
38 pinctrl-1 = <&uart1_sleep_mode>; 39 pinctrl-1 = <&uart1_sleep_mode>;
39 status = "okay";
40 }; 40 };
41 41
42 uart@80007000 { 42 uart@80007000 {
diff --git a/arch/arm/boot/dts/ste-hrefprev60-stuib.dts b/arch/arm/boot/dts/ste-hrefprev60-stuib.dts
index 2b1cb5b584b6..18e9795a94f9 100644
--- a/arch/arm/boot/dts/ste-hrefprev60-stuib.dts
+++ b/arch/arm/boot/dts/ste-hrefprev60-stuib.dts
@@ -17,6 +17,13 @@
17 model = "ST-Ericsson HREF (pre-v60) and ST UIB"; 17 model = "ST-Ericsson HREF (pre-v60) and ST UIB";
18 compatible = "st-ericsson,mop500", "st-ericsson,u8500"; 18 compatible = "st-ericsson,mop500", "st-ericsson,u8500";
19 19
20 /* This stablilizes the serial port enumeration */
21 aliases {
22 serial0 = &ux500_serial0;
23 serial1 = &ux500_serial1;
24 serial2 = &ux500_serial2;
25 };
26
20 soc { 27 soc {
21 /* Reset line for the BU21013 touchscreen */ 28 /* Reset line for the BU21013 touchscreen */
22 i2c@80110000 { 29 i2c@80110000 {
diff --git a/arch/arm/boot/dts/ste-hrefprev60-tvk.dts b/arch/arm/boot/dts/ste-hrefprev60-tvk.dts
index 59523f866812..24739914e689 100644
--- a/arch/arm/boot/dts/ste-hrefprev60-tvk.dts
+++ b/arch/arm/boot/dts/ste-hrefprev60-tvk.dts
@@ -16,4 +16,11 @@
16/ { 16/ {
17 model = "ST-Ericsson HREF (pre-v60) and TVK1281618 UIB"; 17 model = "ST-Ericsson HREF (pre-v60) and TVK1281618 UIB";
18 compatible = "st-ericsson,mop500", "st-ericsson,u8500"; 18 compatible = "st-ericsson,mop500", "st-ericsson,u8500";
19
20 /* This stablilizes the serial port enumeration */
21 aliases {
22 serial0 = &ux500_serial0;
23 serial1 = &ux500_serial1;
24 serial2 = &ux500_serial2;
25 };
19}; 26};
diff --git a/arch/arm/boot/dts/ste-hrefprev60.dtsi b/arch/arm/boot/dts/ste-hrefprev60.dtsi
index 7f3975b58d16..b0278f4c486c 100644
--- a/arch/arm/boot/dts/ste-hrefprev60.dtsi
+++ b/arch/arm/boot/dts/ste-hrefprev60.dtsi
@@ -23,6 +23,11 @@
23 }; 23 };
24 24
25 soc { 25 soc {
26 /* Enable UART1 on this board */
27 uart@80121000 {
28 status = "okay";
29 };
30
26 i2c@80004000 { 31 i2c@80004000 {
27 tps61052@33 { 32 tps61052@33 {
28 compatible = "tps61052"; 33 compatible = "tps61052";
diff --git a/arch/arm/boot/dts/ste-hrefv60plus-stuib.dts b/arch/arm/boot/dts/ste-hrefv60plus-stuib.dts
index 8c6a2de56cf1..c2e1ba019a2f 100644
--- a/arch/arm/boot/dts/ste-hrefv60plus-stuib.dts
+++ b/arch/arm/boot/dts/ste-hrefv60plus-stuib.dts
@@ -19,6 +19,13 @@
19 model = "ST-Ericsson HREF (v60+) and ST UIB"; 19 model = "ST-Ericsson HREF (v60+) and ST UIB";
20 compatible = "st-ericsson,hrefv60+", "st-ericsson,u8500"; 20 compatible = "st-ericsson,hrefv60+", "st-ericsson,u8500";
21 21
22 /* This stablilizes the serial port enumeration */
23 aliases {
24 serial0 = &ux500_serial0;
25 serial1 = &ux500_serial1;
26 serial2 = &ux500_serial2;
27 };
28
22 soc { 29 soc {
23 /* Reset line for the BU21013 touchscreen */ 30 /* Reset line for the BU21013 touchscreen */
24 i2c@80110000 { 31 i2c@80110000 {
diff --git a/arch/arm/boot/dts/ste-hrefv60plus-tvk.dts b/arch/arm/boot/dts/ste-hrefv60plus-tvk.dts
index d53cccdce776..ebd8547e98f1 100644
--- a/arch/arm/boot/dts/ste-hrefv60plus-tvk.dts
+++ b/arch/arm/boot/dts/ste-hrefv60plus-tvk.dts
@@ -18,4 +18,11 @@
18/ { 18/ {
19 model = "ST-Ericsson HREF (v60+) and TVK1281618 UIB"; 19 model = "ST-Ericsson HREF (v60+) and TVK1281618 UIB";
20 compatible = "st-ericsson,hrefv60+", "st-ericsson,u8500"; 20 compatible = "st-ericsson,hrefv60+", "st-ericsson,u8500";
21
22 /* This stablilizes the serial port enumeration */
23 aliases {
24 serial0 = &ux500_serial0;
25 serial1 = &ux500_serial1;
26 serial2 = &ux500_serial2;
27 };
21}; 28};
diff --git a/arch/arm/boot/dts/ste-hrefv60plus.dtsi b/arch/arm/boot/dts/ste-hrefv60plus.dtsi
index a4bc9e77d640..810cda743b6d 100644
--- a/arch/arm/boot/dts/ste-hrefv60plus.dtsi
+++ b/arch/arm/boot/dts/ste-hrefv60plus.dtsi
@@ -43,15 +43,26 @@
43 <&vaudio_hf_hrefv60_mode>, 43 <&vaudio_hf_hrefv60_mode>,
44 <&gbf_hrefv60_mode>, 44 <&gbf_hrefv60_mode>,
45 <&hdtv_hrefv60_mode>, 45 <&hdtv_hrefv60_mode>,
46 <&touch_hrefv60_mode>; 46 <&touch_hrefv60_mode>,
47 <&gpios_hrefv60_mode>;
47 48
48 sdi0 { 49 sdi0 {
49 /* SD card detect GPIO pin, extend default state */
50 sdi0_default_mode: sdi0_default { 50 sdi0_default_mode: sdi0_default {
51 /* SD card detect GPIO pin, extend default state */
51 default_hrefv60_cfg1 { 52 default_hrefv60_cfg1 {
52 pins = "GPIO95_E8"; 53 pins = "GPIO95_E8";
53 ste,config = <&gpio_in_pu>; 54 ste,config = <&gpio_in_pu>;
54 }; 55 };
56 /* VMMCI level-shifter enable */
57 default_hrefv60_cfg2 {
58 pins = "GPIO169_D22";
59 ste,config = <&gpio_out_lo>;
60 };
61 /* VMMCI level-shifter voltage select */
62 default_hrefv60_cfg3 {
63 pins = "GPIO5_AG6";
64 ste,config = <&gpio_out_hi>;
65 };
55 }; 66 };
56 }; 67 };
57 ipgpio { 68 ipgpio {
@@ -213,6 +224,16 @@
213 }; 224 };
214 }; 225 };
215 }; 226 };
227 gpios {
228 /* Dangling GPIO pins */
229 gpios_hrefv60_mode: gpios_hrefv60 {
230 default_cfg1 {
231 /* Normally UART1 RXD, now dangling */
232 pins = "GPIO4_AH6";
233 ste,config = <&in_pu>;
234 };
235 };
236 };
216 }; 237 };
217 }; 238 };
218}; 239};
diff --git a/arch/arm/boot/dts/ste-nomadik-nhk15.dts b/arch/arm/boot/dts/ste-nomadik-nhk15.dts
index 3d0b8755caee..3d25dba143a5 100644
--- a/arch/arm/boot/dts/ste-nomadik-nhk15.dts
+++ b/arch/arm/boot/dts/ste-nomadik-nhk15.dts
@@ -17,6 +17,7 @@
17 }; 17 };
18 18
19 aliases { 19 aliases {
20 serial1 = &uart1;
20 stmpe-i2c0 = &stmpe0; 21 stmpe-i2c0 = &stmpe0;
21 stmpe-i2c1 = &stmpe1; 22 stmpe-i2c1 = &stmpe1;
22 }; 23 };
diff --git a/arch/arm/boot/dts/ste-nomadik-s8815.dts b/arch/arm/boot/dts/ste-nomadik-s8815.dts
index 85d3b95dfdba..3c140d05f796 100644
--- a/arch/arm/boot/dts/ste-nomadik-s8815.dts
+++ b/arch/arm/boot/dts/ste-nomadik-s8815.dts
@@ -15,6 +15,10 @@
15 bootargs = "root=/dev/ram0 console=ttyAMA1,115200n8 earlyprintk"; 15 bootargs = "root=/dev/ram0 console=ttyAMA1,115200n8 earlyprintk";
16 }; 16 };
17 17
18 aliases {
19 serial1 = &uart1;
20 };
21
18 src@101e0000 { 22 src@101e0000 {
19 /* These chrystal drivers are not used on this board */ 23 /* These chrystal drivers are not used on this board */
20 disable-sxtalo; 24 disable-sxtalo;
diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
index 9a5f2ba139b7..ef794a33b4dc 100644
--- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
+++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
@@ -757,6 +757,7 @@
757 clock-names = "uartclk", "apb_pclk"; 757 clock-names = "uartclk", "apb_pclk";
758 pinctrl-names = "default"; 758 pinctrl-names = "default";
759 pinctrl-0 = <&uart0_default_mux>; 759 pinctrl-0 = <&uart0_default_mux>;
760 status = "disabled";
760 }; 761 };
761 762
762 uart1: uart@101fb000 { 763 uart1: uart@101fb000 {
diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts
index 9edadc37719f..32a5ccb14e7e 100644
--- a/arch/arm/boot/dts/ste-snowball.dts
+++ b/arch/arm/boot/dts/ste-snowball.dts
@@ -18,6 +18,13 @@
18 model = "Calao Systems Snowball platform with device tree"; 18 model = "Calao Systems Snowball platform with device tree";
19 compatible = "calaosystems,snowball-a9500", "st-ericsson,u9500"; 19 compatible = "calaosystems,snowball-a9500", "st-ericsson,u9500";
20 20
21 /* This stablilizes the serial port enumeration */
22 aliases {
23 serial0 = &ux500_serial0;
24 serial1 = &ux500_serial1;
25 serial2 = &ux500_serial2;
26 };
27
21 memory { 28 memory {
22 reg = <0x00000000 0x20000000>; 29 reg = <0x00000000 0x20000000>;
23 }; 30 };
@@ -223,11 +230,11 @@
223 status = "okay"; 230 status = "okay";
224 }; 231 };
225 232
233 /* This UART is unused and thus left disabled */
226 uart@80121000 { 234 uart@80121000 {
227 pinctrl-names = "default", "sleep"; 235 pinctrl-names = "default", "sleep";
228 pinctrl-0 = <&uart1_default_mode>; 236 pinctrl-0 = <&uart1_default_mode>;
229 pinctrl-1 = <&uart1_sleep_mode>; 237 pinctrl-1 = <&uart1_sleep_mode>;
230 status = "okay";
231 }; 238 };
232 239
233 uart@80007000 { 240 uart@80007000 {
@@ -452,7 +459,21 @@
452 pins = "GPIO21_AB3"; /* DAT31DIR */ 459 pins = "GPIO21_AB3"; /* DAT31DIR */
453 ste,config = <&out_hi>; 460 ste,config = <&out_hi>;
454 }; 461 };
455 462 /* SD card detect GPIO pin, extend default state */
463 snowball_cfg2 {
464 pins = "GPIO218_AH11";
465 ste,config = <&gpio_in_pu>;
466 };
467 /* VMMCI level-shifter enable */
468 snowball_cfg3 {
469 pins = "GPIO217_AH12";
470 ste,config = <&gpio_out_lo>;
471 };
472 /* VMMCI level-shifter voltage select */
473 snowball_cfg4 {
474 pins = "GPIO228_AJ6";
475 ste,config = <&gpio_out_hi>;
476 };
456 }; 477 };
457 }; 478 };
458 ssp0 { 479 ssp0 {
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 83c50193626c..30b3bc1666d2 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -13,6 +13,7 @@ generic-y += kdebug.h
13generic-y += local.h 13generic-y += local.h
14generic-y += local64.h 14generic-y += local64.h
15generic-y += mcs_spinlock.h 15generic-y += mcs_spinlock.h
16generic-y += mm-arch-hooks.h
16generic-y += msgbuf.h 17generic-y += msgbuf.h
17generic-y += param.h 18generic-y += param.h
18generic-y += parport.h 19generic-y += parport.h
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 6f225acc07c5..b7f6fb462ea0 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -286,7 +286,7 @@ extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x);
286 */ 286 */
287static inline phys_addr_t __virt_to_idmap(unsigned long x) 287static inline phys_addr_t __virt_to_idmap(unsigned long x)
288{ 288{
289 if (arch_virt_to_idmap) 289 if (IS_ENABLED(CONFIG_MMU) && arch_virt_to_idmap)
290 return arch_virt_to_idmap(x); 290 return arch_virt_to_idmap(x);
291 else 291 else
292 return __virt_to_phys(x); 292 return __virt_to_phys(x);
diff --git a/arch/arm/include/asm/mm-arch-hooks.h b/arch/arm/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 7056660c7cc4..000000000000
--- a/arch/arm/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_ARM_MM_ARCH_HOOKS_H
13#define _ASM_ARM_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_ARM_MM_ARCH_HOOKS_H */
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 92828a1dec80..b48dd4f37f80 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -61,6 +61,7 @@ work_pending:
61 movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE) 61 movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
62 ldmia sp, {r0 - r6} @ have to reload r0 - r6 62 ldmia sp, {r0 - r6} @ have to reload r0 - r6
63 b local_restart @ ... and off we go 63 b local_restart @ ... and off we go
64ENDPROC(ret_fast_syscall)
64 65
65/* 66/*
66 * "slow" syscall return path. "why" tells us if this was a real syscall. 67 * "slow" syscall return path. "why" tells us if this was a real syscall.
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index bd755d97e459..29e2991465cb 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -399,6 +399,9 @@ ENTRY(secondary_startup)
399 sub lr, r4, r5 @ mmu has been enabled 399 sub lr, r4, r5 @ mmu has been enabled
400 add r3, r7, lr 400 add r3, r7, lr
401 ldrd r4, [r3, #0] @ get secondary_data.pgdir 401 ldrd r4, [r3, #0] @ get secondary_data.pgdir
402ARM_BE8(eor r4, r4, r5) @ Swap r5 and r4 in BE:
403ARM_BE8(eor r5, r4, r5) @ it can be done in 3 steps
404ARM_BE8(eor r4, r4, r5) @ without using a temp reg.
402 ldr r8, [r3, #8] @ get secondary_data.swapper_pg_dir 405 ldr r8, [r3, #8] @ get secondary_data.swapper_pg_dir
403 badr lr, __enable_mmu @ return address 406 badr lr, __enable_mmu @ return address
404 mov r13, r12 @ __secondary_switched address 407 mov r13, r12 @ __secondary_switched address
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 357f57ea83f4..54272e0be713 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -818,12 +818,13 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
818 if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL)) 818 if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL))
819 break; 819 break;
820 820
821 of_node_put(dn);
822 if (cpu >= nr_cpu_ids) { 821 if (cpu >= nr_cpu_ids) {
823 pr_warn("Failed to find logical CPU for %s\n", 822 pr_warn("Failed to find logical CPU for %s\n",
824 dn->name); 823 dn->name);
824 of_node_put(dn);
825 break; 825 break;
826 } 826 }
827 of_node_put(dn);
827 828
828 irqs[i] = cpu; 829 irqs[i] = cpu;
829 cpumask_set_cpu(cpu, &pmu->supported_cpus); 830 cpumask_set_cpu(cpu, &pmu->supported_cpus);
diff --git a/arch/arm/kernel/reboot.c b/arch/arm/kernel/reboot.c
index 1a4d232796be..38269358fd25 100644
--- a/arch/arm/kernel/reboot.c
+++ b/arch/arm/kernel/reboot.c
@@ -50,7 +50,7 @@ static void __soft_restart(void *addr)
50 flush_cache_all(); 50 flush_cache_all();
51 51
52 /* Switch to the identity mapping. */ 52 /* Switch to the identity mapping. */
53 phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); 53 phys_reset = (phys_reset_t)(unsigned long)virt_to_idmap(cpu_reset);
54 phys_reset((unsigned long)addr); 54 phys_reset((unsigned long)addr);
55 55
56 /* Should never get here. */ 56 /* Should never get here. */
diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
index efe17dd9b921..54a5aeab988d 100644
--- a/arch/arm/kernel/vdso.c
+++ b/arch/arm/kernel/vdso.c
@@ -296,7 +296,6 @@ static bool tk_is_cntvct(const struct timekeeper *tk)
296 */ 296 */
297void update_vsyscall(struct timekeeper *tk) 297void update_vsyscall(struct timekeeper *tk)
298{ 298{
299 struct timespec xtime_coarse;
300 struct timespec64 *wtm = &tk->wall_to_monotonic; 299 struct timespec64 *wtm = &tk->wall_to_monotonic;
301 300
302 if (!cntvct_ok) { 301 if (!cntvct_ok) {
@@ -308,10 +307,10 @@ void update_vsyscall(struct timekeeper *tk)
308 307
309 vdso_write_begin(vdso_data); 308 vdso_write_begin(vdso_data);
310 309
311 xtime_coarse = __current_kernel_time();
312 vdso_data->tk_is_cntvct = tk_is_cntvct(tk); 310 vdso_data->tk_is_cntvct = tk_is_cntvct(tk);
313 vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec; 311 vdso_data->xtime_coarse_sec = tk->xtime_sec;
314 vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec; 312 vdso_data->xtime_coarse_nsec = (u32)(tk->tkr_mono.xtime_nsec >>
313 tk->tkr_mono.shift);
315 vdso_data->wtm_clock_sec = wtm->tv_sec; 314 vdso_data->wtm_clock_sec = wtm->tv_sec;
316 vdso_data->wtm_clock_nsec = wtm->tv_nsec; 315 vdso_data->wtm_clock_nsec = wtm->tv_nsec;
317 316
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index 3e58d710013c..4b39af2dfda9 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -96,7 +96,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
96 } 96 }
97 97
98 /* the mmap semaphore is taken only if not in an atomic context */ 98 /* the mmap semaphore is taken only if not in an atomic context */
99 atomic = in_atomic(); 99 atomic = faulthandler_disabled();
100 100
101 if (!atomic) 101 if (!atomic)
102 down_read(&current->mm->mmap_sem); 102 down_read(&current->mm->mmap_sem);
diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c
index 6001f1c9d136..4a87e86dec45 100644
--- a/arch/arm/mach-exynos/pm_domains.c
+++ b/arch/arm/mach-exynos/pm_domains.c
@@ -146,9 +146,8 @@ static __init int exynos4_pm_init_power_domain(void)
146 pd->base = of_iomap(np, 0); 146 pd->base = of_iomap(np, 0);
147 if (!pd->base) { 147 if (!pd->base) {
148 pr_warn("%s: failed to map memory\n", __func__); 148 pr_warn("%s: failed to map memory\n", __func__);
149 kfree(pd->pd.name); 149 kfree_const(pd->pd.name);
150 kfree(pd); 150 kfree(pd);
151 of_node_put(np);
152 continue; 151 continue;
153 } 152 }
154 153
diff --git a/arch/arm/mach-imx/gpc.c b/arch/arm/mach-imx/gpc.c
index 80bad29d609a..8c4467fad837 100644
--- a/arch/arm/mach-imx/gpc.c
+++ b/arch/arm/mach-imx/gpc.c
@@ -291,8 +291,6 @@ void __init imx_gpc_check_dt(void)
291 } 291 }
292} 292}
293 293
294#ifdef CONFIG_PM_GENERIC_DOMAINS
295
296static void _imx6q_pm_pu_power_off(struct generic_pm_domain *genpd) 294static void _imx6q_pm_pu_power_off(struct generic_pm_domain *genpd)
297{ 295{
298 int iso, iso2sw; 296 int iso, iso2sw;
@@ -399,7 +397,6 @@ static struct genpd_onecell_data imx_gpc_onecell_data = {
399static int imx_gpc_genpd_init(struct device *dev, struct regulator *pu_reg) 397static int imx_gpc_genpd_init(struct device *dev, struct regulator *pu_reg)
400{ 398{
401 struct clk *clk; 399 struct clk *clk;
402 bool is_off;
403 int i; 400 int i;
404 401
405 imx6q_pu_domain.reg = pu_reg; 402 imx6q_pu_domain.reg = pu_reg;
@@ -416,18 +413,13 @@ static int imx_gpc_genpd_init(struct device *dev, struct regulator *pu_reg)
416 } 413 }
417 imx6q_pu_domain.num_clks = i; 414 imx6q_pu_domain.num_clks = i;
418 415
419 is_off = IS_ENABLED(CONFIG_PM); 416 /* Enable power always in case bootloader disabled it. */
420 if (is_off) { 417 imx6q_pm_pu_power_on(&imx6q_pu_domain.base);
421 _imx6q_pm_pu_power_off(&imx6q_pu_domain.base); 418
422 } else { 419 if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS))
423 /* 420 return 0;
424 * Enable power if compiled without CONFIG_PM in case the
425 * bootloader disabled it.
426 */
427 imx6q_pm_pu_power_on(&imx6q_pu_domain.base);
428 }
429 421
430 pm_genpd_init(&imx6q_pu_domain.base, NULL, is_off); 422 pm_genpd_init(&imx6q_pu_domain.base, NULL, false);
431 return of_genpd_add_provider_onecell(dev->of_node, 423 return of_genpd_add_provider_onecell(dev->of_node,
432 &imx_gpc_onecell_data); 424 &imx_gpc_onecell_data);
433 425
@@ -437,13 +429,6 @@ clk_err:
437 return -EINVAL; 429 return -EINVAL;
438} 430}
439 431
440#else
441static inline int imx_gpc_genpd_init(struct device *dev, struct regulator *reg)
442{
443 return 0;
444}
445#endif /* CONFIG_PM_GENERIC_DOMAINS */
446
447static int imx_gpc_probe(struct platform_device *pdev) 432static int imx_gpc_probe(struct platform_device *pdev)
448{ 433{
449 struct regulator *pu_reg; 434 struct regulator *pu_reg;
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index ecc04ff13e95..4a023e8d1bdb 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -60,6 +60,7 @@ config SOC_AM43XX
60 select ARM_GIC 60 select ARM_GIC
61 select MACH_OMAP_GENERIC 61 select MACH_OMAP_GENERIC
62 select MIGHT_HAVE_CACHE_L2X0 62 select MIGHT_HAVE_CACHE_L2X0
63 select HAVE_ARM_SCU
63 64
64config SOC_DRA7XX 65config SOC_DRA7XX
65 bool "TI DRA7XX" 66 bool "TI DRA7XX"
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
index 8e52621b5a6b..e1d2e991d17a 100644
--- a/arch/arm/mach-omap2/omap-wakeupgen.c
+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
@@ -392,6 +392,7 @@ static struct irq_chip wakeupgen_chip = {
392 .irq_mask = wakeupgen_mask, 392 .irq_mask = wakeupgen_mask,
393 .irq_unmask = wakeupgen_unmask, 393 .irq_unmask = wakeupgen_unmask,
394 .irq_retrigger = irq_chip_retrigger_hierarchy, 394 .irq_retrigger = irq_chip_retrigger_hierarchy,
395 .irq_set_type = irq_chip_set_type_parent,
395 .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND, 396 .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
396#ifdef CONFIG_SMP 397#ifdef CONFIG_SMP
397 .irq_set_affinity = irq_chip_set_affinity_parent, 398 .irq_set_affinity = irq_chip_set_affinity_parent,
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index d78c12e7cb5e..486cc4ded190 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -2373,6 +2373,9 @@ static int of_dev_hwmod_lookup(struct device_node *np,
2373 * registers. This address is needed early so the OCP registers that 2373 * registers. This address is needed early so the OCP registers that
2374 * are part of the device's address space can be ioremapped properly. 2374 * are part of the device's address space can be ioremapped properly.
2375 * 2375 *
2376 * If SYSC access is not needed, the registers will not be remapped
2377 * and non-availability of MPU access is not treated as an error.
2378 *
2376 * Returns 0 on success, -EINVAL if an invalid hwmod is passed, and 2379 * Returns 0 on success, -EINVAL if an invalid hwmod is passed, and
2377 * -ENXIO on absent or invalid register target address space. 2380 * -ENXIO on absent or invalid register target address space.
2378 */ 2381 */
@@ -2387,6 +2390,11 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
2387 2390
2388 _save_mpu_port_index(oh); 2391 _save_mpu_port_index(oh);
2389 2392
2393 /* if we don't need sysc access we don't need to ioremap */
2394 if (!oh->class->sysc)
2395 return 0;
2396
2397 /* we can't continue without MPU PORT if we need sysc access */
2390 if (oh->_int_flags & _HWMOD_NO_MPU_PORT) 2398 if (oh->_int_flags & _HWMOD_NO_MPU_PORT)
2391 return -ENXIO; 2399 return -ENXIO;
2392 2400
@@ -2396,8 +2404,10 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
2396 oh->name); 2404 oh->name);
2397 2405
2398 /* Extract the IO space from device tree blob */ 2406 /* Extract the IO space from device tree blob */
2399 if (!np) 2407 if (!np) {
2408 pr_err("omap_hwmod: %s: no dt node\n", oh->name);
2400 return -ENXIO; 2409 return -ENXIO;
2410 }
2401 2411
2402 va_start = of_iomap(np, index + oh->mpu_rt_idx); 2412 va_start = of_iomap(np, index + oh->mpu_rt_idx);
2403 } else { 2413 } else {
@@ -2456,13 +2466,11 @@ static int __init _init(struct omap_hwmod *oh, void *data)
2456 oh->name, np->name); 2466 oh->name, np->name);
2457 } 2467 }
2458 2468
2459 if (oh->class->sysc) { 2469 r = _init_mpu_rt_base(oh, NULL, index, np);
2460 r = _init_mpu_rt_base(oh, NULL, index, np); 2470 if (r < 0) {
2461 if (r < 0) { 2471 WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n",
2462 WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n", 2472 oh->name);
2463 oh->name); 2473 return 0;
2464 return 0;
2465 }
2466 } 2474 }
2467 2475
2468 r = _init_clocks(oh, NULL); 2476 r = _init_clocks(oh, NULL);
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index 2606c6608bd8..562247bced49 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -827,8 +827,7 @@ static struct omap_hwmod_class_sysconfig dra7xx_gpmc_sysc = {
827 .syss_offs = 0x0014, 827 .syss_offs = 0x0014,
828 .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_SIDLEMODE | 828 .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_SIDLEMODE |
829 SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), 829 SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
830 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | 830 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
831 SIDLE_SMART_WKUP),
832 .sysc_fields = &omap_hwmod_sysc_type1, 831 .sysc_fields = &omap_hwmod_sysc_type1,
833}; 832};
834 833
@@ -844,7 +843,7 @@ static struct omap_hwmod dra7xx_gpmc_hwmod = {
844 .class = &dra7xx_gpmc_hwmod_class, 843 .class = &dra7xx_gpmc_hwmod_class,
845 .clkdm_name = "l3main1_clkdm", 844 .clkdm_name = "l3main1_clkdm",
846 /* Skip reset for CONFIG_OMAP_GPMC_DEBUG for bootloader timings */ 845 /* Skip reset for CONFIG_OMAP_GPMC_DEBUG for bootloader timings */
847 .flags = HWMOD_SWSUP_SIDLE | DEBUG_OMAP_GPMC_HWMOD_FLAGS, 846 .flags = DEBUG_OMAP_GPMC_HWMOD_FLAGS,
848 .main_clk = "l3_iclk_div", 847 .main_clk = "l3_iclk_div",
849 .prcm = { 848 .prcm = {
850 .omap4 = { 849 .omap4 = {
diff --git a/arch/arm/mach-pxa/capc7117.c b/arch/arm/mach-pxa/capc7117.c
index c092730749b9..bf366b39fa61 100644
--- a/arch/arm/mach-pxa/capc7117.c
+++ b/arch/arm/mach-pxa/capc7117.c
@@ -24,6 +24,7 @@
24#include <linux/ata_platform.h> 24#include <linux/ata_platform.h>
25#include <linux/serial_8250.h> 25#include <linux/serial_8250.h>
26#include <linux/gpio.h> 26#include <linux/gpio.h>
27#include <linux/regulator/machine.h>
27 28
28#include <asm/mach-types.h> 29#include <asm/mach-types.h>
29#include <asm/mach/arch.h> 30#include <asm/mach/arch.h>
@@ -144,6 +145,8 @@ static void __init capc7117_init(void)
144 145
145 capc7117_uarts_init(); 146 capc7117_uarts_init();
146 capc7117_ide_init(); 147 capc7117_ide_init();
148
149 regulator_has_full_constraints();
147} 150}
148 151
149MACHINE_START(CAPC7117, 152MACHINE_START(CAPC7117,
diff --git a/arch/arm/mach-pxa/cm-x2xx.c b/arch/arm/mach-pxa/cm-x2xx.c
index bb99f59a36d8..a17a91eb8e9a 100644
--- a/arch/arm/mach-pxa/cm-x2xx.c
+++ b/arch/arm/mach-pxa/cm-x2xx.c
@@ -13,6 +13,7 @@
13#include <linux/syscore_ops.h> 13#include <linux/syscore_ops.h>
14#include <linux/irq.h> 14#include <linux/irq.h>
15#include <linux/gpio.h> 15#include <linux/gpio.h>
16#include <linux/regulator/machine.h>
16 17
17#include <linux/dm9000.h> 18#include <linux/dm9000.h>
18#include <linux/leds.h> 19#include <linux/leds.h>
@@ -466,6 +467,8 @@ static void __init cmx2xx_init(void)
466 cmx2xx_init_ac97(); 467 cmx2xx_init_ac97();
467 cmx2xx_init_touchscreen(); 468 cmx2xx_init_touchscreen();
468 cmx2xx_init_leds(); 469 cmx2xx_init_leds();
470
471 regulator_has_full_constraints();
469} 472}
470 473
471static void __init cmx2xx_init_irq(void) 474static void __init cmx2xx_init_irq(void)
diff --git a/arch/arm/mach-pxa/cm-x300.c b/arch/arm/mach-pxa/cm-x300.c
index 4d3588d26c2a..5851f4c254c1 100644
--- a/arch/arm/mach-pxa/cm-x300.c
+++ b/arch/arm/mach-pxa/cm-x300.c
@@ -835,6 +835,8 @@ static void __init cm_x300_init(void)
835 cm_x300_init_ac97(); 835 cm_x300_init_ac97();
836 cm_x300_init_wi2wi(); 836 cm_x300_init_wi2wi();
837 cm_x300_init_bl(); 837 cm_x300_init_bl();
838
839 regulator_has_full_constraints();
838} 840}
839 841
840static void __init cm_x300_fixup(struct tag *tags, char **cmdline) 842static void __init cm_x300_fixup(struct tag *tags, char **cmdline)
diff --git a/arch/arm/mach-pxa/colibri-pxa270.c b/arch/arm/mach-pxa/colibri-pxa270.c
index 5f9d9303b346..3503826333c7 100644
--- a/arch/arm/mach-pxa/colibri-pxa270.c
+++ b/arch/arm/mach-pxa/colibri-pxa270.c
@@ -18,6 +18,7 @@
18#include <linux/mtd/partitions.h> 18#include <linux/mtd/partitions.h>
19#include <linux/mtd/physmap.h> 19#include <linux/mtd/physmap.h>
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/regulator/machine.h>
21#include <linux/ucb1400.h> 22#include <linux/ucb1400.h>
22 23
23#include <asm/mach/arch.h> 24#include <asm/mach/arch.h>
@@ -294,6 +295,8 @@ static void __init colibri_pxa270_init(void)
294 printk(KERN_ERR "Illegal colibri_pxa270_baseboard type %d\n", 295 printk(KERN_ERR "Illegal colibri_pxa270_baseboard type %d\n",
295 colibri_pxa270_baseboard); 296 colibri_pxa270_baseboard);
296 } 297 }
298
299 regulator_has_full_constraints();
297} 300}
298 301
299/* The "Income s.r.o. SH-Dmaster PXA270 SBC" board can be booted either 302/* The "Income s.r.o. SH-Dmaster PXA270 SBC" board can be booted either
diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c
index 51531ecffca8..9d7072b04045 100644
--- a/arch/arm/mach-pxa/em-x270.c
+++ b/arch/arm/mach-pxa/em-x270.c
@@ -1306,6 +1306,8 @@ static void __init em_x270_init(void)
1306 em_x270_init_i2c(); 1306 em_x270_init_i2c();
1307 em_x270_init_camera(); 1307 em_x270_init_camera();
1308 em_x270_userspace_consumers_init(); 1308 em_x270_userspace_consumers_init();
1309
1310 regulator_has_full_constraints();
1309} 1311}
1310 1312
1311MACHINE_START(EM_X270, "Compulab EM-X270") 1313MACHINE_START(EM_X270, "Compulab EM-X270")
diff --git a/arch/arm/mach-pxa/icontrol.c b/arch/arm/mach-pxa/icontrol.c
index c98511c5abd1..9b0eb0252af6 100644
--- a/arch/arm/mach-pxa/icontrol.c
+++ b/arch/arm/mach-pxa/icontrol.c
@@ -26,6 +26,7 @@
26#include <linux/spi/spi.h> 26#include <linux/spi/spi.h>
27#include <linux/spi/pxa2xx_spi.h> 27#include <linux/spi/pxa2xx_spi.h>
28#include <linux/can/platform/mcp251x.h> 28#include <linux/can/platform/mcp251x.h>
29#include <linux/regulator/machine.h>
29 30
30#include "generic.h" 31#include "generic.h"
31 32
@@ -185,6 +186,8 @@ static void __init icontrol_init(void)
185 mxm_8x10_mmc_init(); 186 mxm_8x10_mmc_init();
186 187
187 icontrol_can_init(); 188 icontrol_can_init();
189
190 regulator_has_full_constraints();
188} 191}
189 192
190MACHINE_START(ICONTROL, "iControl/SafeTcam boards using Embedian MXM-8x10 CoM") 193MACHINE_START(ICONTROL, "iControl/SafeTcam boards using Embedian MXM-8x10 CoM")
diff --git a/arch/arm/mach-pxa/trizeps4.c b/arch/arm/mach-pxa/trizeps4.c
index 872dcb20e757..066e3a250ee0 100644
--- a/arch/arm/mach-pxa/trizeps4.c
+++ b/arch/arm/mach-pxa/trizeps4.c
@@ -26,6 +26,7 @@
26#include <linux/dm9000.h> 26#include <linux/dm9000.h>
27#include <linux/mtd/physmap.h> 27#include <linux/mtd/physmap.h>
28#include <linux/mtd/partitions.h> 28#include <linux/mtd/partitions.h>
29#include <linux/regulator/machine.h>
29#include <linux/i2c/pxa-i2c.h> 30#include <linux/i2c/pxa-i2c.h>
30 31
31#include <asm/types.h> 32#include <asm/types.h>
@@ -534,6 +535,8 @@ static void __init trizeps4_init(void)
534 535
535 BCR_writew(trizeps_conxs_bcr); 536 BCR_writew(trizeps_conxs_bcr);
536 board_backlight_power(1); 537 board_backlight_power(1);
538
539 regulator_has_full_constraints();
537} 540}
538 541
539static void __init trizeps4_map_io(void) 542static void __init trizeps4_map_io(void)
diff --git a/arch/arm/mach-pxa/vpac270.c b/arch/arm/mach-pxa/vpac270.c
index aa89488f961e..54122a983ae3 100644
--- a/arch/arm/mach-pxa/vpac270.c
+++ b/arch/arm/mach-pxa/vpac270.c
@@ -24,6 +24,7 @@
24#include <linux/dm9000.h> 24#include <linux/dm9000.h>
25#include <linux/ucb1400.h> 25#include <linux/ucb1400.h>
26#include <linux/ata_platform.h> 26#include <linux/ata_platform.h>
27#include <linux/regulator/machine.h>
27#include <linux/regulator/max1586.h> 28#include <linux/regulator/max1586.h>
28#include <linux/i2c/pxa-i2c.h> 29#include <linux/i2c/pxa-i2c.h>
29 30
@@ -711,6 +712,8 @@ static void __init vpac270_init(void)
711 vpac270_ts_init(); 712 vpac270_ts_init();
712 vpac270_rtc_init(); 713 vpac270_rtc_init();
713 vpac270_ide_init(); 714 vpac270_ide_init();
715
716 regulator_has_full_constraints();
714} 717}
715 718
716MACHINE_START(VPAC270, "Voipac PXA270") 719MACHINE_START(VPAC270, "Voipac PXA270")
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
index ac2ae5c71ab4..6158566fa0f7 100644
--- a/arch/arm/mach-pxa/zeus.c
+++ b/arch/arm/mach-pxa/zeus.c
@@ -868,6 +868,8 @@ static void __init zeus_init(void)
868 i2c_register_board_info(0, ARRAY_AND_SIZE(zeus_i2c_devices)); 868 i2c_register_board_info(0, ARRAY_AND_SIZE(zeus_i2c_devices));
869 pxa2xx_set_spi_info(3, &pxa2xx_spi_ssp3_master_info); 869 pxa2xx_set_spi_info(3, &pxa2xx_spi_ssp3_master_info);
870 spi_register_board_info(zeus_spi_board_info, ARRAY_SIZE(zeus_spi_board_info)); 870 spi_register_board_info(zeus_spi_board_info, ARRAY_SIZE(zeus_spi_board_info));
871
872 regulator_has_full_constraints();
871} 873}
872 874
873static struct map_desc zeus_io_desc[] __initdata = { 875static struct map_desc zeus_io_desc[] __initdata = {
diff --git a/arch/arm/mach-spear/generic.h b/arch/arm/mach-spear/generic.h
index a99d90a4d09c..06640914d9a0 100644
--- a/arch/arm/mach-spear/generic.h
+++ b/arch/arm/mach-spear/generic.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2009-2012 ST Microelectronics 4 * Copyright (C) 2009-2012 ST Microelectronics
5 * Rajeev Kumar <rajeev-dlh.kumar@st.com> 5 * Rajeev Kumar <rajeev-dlh.kumar@st.com>
6 * Viresh Kumar <viresh.linux@gmail.com> 6 * Viresh Kumar <vireshk@kernel.org>
7 * 7 *
8 * This file is licensed under the terms of the GNU General Public 8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any 9 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear/include/mach/irqs.h b/arch/arm/mach-spear/include/mach/irqs.h
index 92da0a8c6bce..7058720c5278 100644
--- a/arch/arm/mach-spear/include/mach/irqs.h
+++ b/arch/arm/mach-spear/include/mach/irqs.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2009-2012 ST Microelectronics 4 * Copyright (C) 2009-2012 ST Microelectronics
5 * Rajeev Kumar <rajeev-dlh.kumar@st.com> 5 * Rajeev Kumar <rajeev-dlh.kumar@st.com>
6 * Viresh Kumar <viresh.linux@gmail.com> 6 * Viresh Kumar <vireshk@kernel.org>
7 * 7 *
8 * This file is licensed under the terms of the GNU General Public 8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any 9 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear/include/mach/misc_regs.h b/arch/arm/mach-spear/include/mach/misc_regs.h
index 935639ce59ba..cfaf7c665b58 100644
--- a/arch/arm/mach-spear/include/mach/misc_regs.h
+++ b/arch/arm/mach-spear/include/mach/misc_regs.h
@@ -4,7 +4,7 @@
4 * Miscellaneous registers definitions for SPEAr3xx machine family 4 * Miscellaneous registers definitions for SPEAr3xx machine family
5 * 5 *
6 * Copyright (C) 2009 ST Microelectronics 6 * Copyright (C) 2009 ST Microelectronics
7 * Viresh Kumar <viresh.linux@gmail.com> 7 * Viresh Kumar <vireshk@kernel.org>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear/include/mach/spear.h b/arch/arm/mach-spear/include/mach/spear.h
index f2d6a0176575..5ed841ccf8a3 100644
--- a/arch/arm/mach-spear/include/mach/spear.h
+++ b/arch/arm/mach-spear/include/mach/spear.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2009,2012 ST Microelectronics 4 * Copyright (C) 2009,2012 ST Microelectronics
5 * Rajeev Kumar<rajeev-dlh.kumar@st.com> 5 * Rajeev Kumar<rajeev-dlh.kumar@st.com>
6 * Viresh Kumar <viresh.linux@gmail.com> 6 * Viresh Kumar <vireshk@kernel.org>
7 * 7 *
8 * This file is licensed under the terms of the GNU General Public 8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any 9 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear/include/mach/uncompress.h b/arch/arm/mach-spear/include/mach/uncompress.h
index 51b2dc93e4da..8439b9c12edb 100644
--- a/arch/arm/mach-spear/include/mach/uncompress.h
+++ b/arch/arm/mach-spear/include/mach/uncompress.h
@@ -4,7 +4,7 @@
4 * Serial port stubs for kernel decompress status messages 4 * Serial port stubs for kernel decompress status messages
5 * 5 *
6 * Copyright (C) 2009 ST Microelectronics 6 * Copyright (C) 2009 ST Microelectronics
7 * Viresh Kumar <viresh.linux@gmail.com> 7 * Viresh Kumar <vireshk@kernel.org>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear/pl080.c b/arch/arm/mach-spear/pl080.c
index cfa1199d0f4a..b4529f3e0ee9 100644
--- a/arch/arm/mach-spear/pl080.c
+++ b/arch/arm/mach-spear/pl080.c
@@ -4,7 +4,7 @@
4 * DMAC pl080 definitions for SPEAr platform 4 * DMAC pl080 definitions for SPEAr platform
5 * 5 *
6 * Copyright (C) 2012 ST Microelectronics 6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.linux@gmail.com> 7 * Viresh Kumar <vireshk@kernel.org>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear/pl080.h b/arch/arm/mach-spear/pl080.h
index eb6590ded40d..608dec6725ae 100644
--- a/arch/arm/mach-spear/pl080.h
+++ b/arch/arm/mach-spear/pl080.h
@@ -4,7 +4,7 @@
4 * DMAC pl080 definitions for SPEAr platform 4 * DMAC pl080 definitions for SPEAr platform
5 * 5 *
6 * Copyright (C) 2012 ST Microelectronics 6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.linux@gmail.com> 7 * Viresh Kumar <vireshk@kernel.org>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear/restart.c b/arch/arm/mach-spear/restart.c
index ce5e098c4888..b4342155a783 100644
--- a/arch/arm/mach-spear/restart.c
+++ b/arch/arm/mach-spear/restart.c
@@ -4,7 +4,7 @@
4 * SPEAr platform specific restart functions 4 * SPEAr platform specific restart functions
5 * 5 *
6 * Copyright (C) 2009 ST Microelectronics 6 * Copyright (C) 2009 ST Microelectronics
7 * Viresh Kumar <viresh.linux@gmail.com> 7 * Viresh Kumar <vireshk@kernel.org>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear/spear1310.c b/arch/arm/mach-spear/spear1310.c
index d9ce4d8000f0..cd5d375d91f0 100644
--- a/arch/arm/mach-spear/spear1310.c
+++ b/arch/arm/mach-spear/spear1310.c
@@ -4,7 +4,7 @@
4 * SPEAr1310 machine source file 4 * SPEAr1310 machine source file
5 * 5 *
6 * Copyright (C) 2012 ST Microelectronics 6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.linux@gmail.com> 7 * Viresh Kumar <vireshk@kernel.org>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear/spear1340.c b/arch/arm/mach-spear/spear1340.c
index 3f3c0f124bd3..94594d5a446c 100644
--- a/arch/arm/mach-spear/spear1340.c
+++ b/arch/arm/mach-spear/spear1340.c
@@ -4,7 +4,7 @@
4 * SPEAr1340 machine source file 4 * SPEAr1340 machine source file
5 * 5 *
6 * Copyright (C) 2012 ST Microelectronics 6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.linux@gmail.com> 7 * Viresh Kumar <vireshk@kernel.org>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear/spear13xx.c b/arch/arm/mach-spear/spear13xx.c
index 2e463a93468d..b7afce6795f4 100644
--- a/arch/arm/mach-spear/spear13xx.c
+++ b/arch/arm/mach-spear/spear13xx.c
@@ -4,7 +4,7 @@
4 * SPEAr13XX machines common source file 4 * SPEAr13XX machines common source file
5 * 5 *
6 * Copyright (C) 2012 ST Microelectronics 6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.linux@gmail.com> 7 * Viresh Kumar <vireshk@kernel.org>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear/spear300.c b/arch/arm/mach-spear/spear300.c
index b52e48f342f4..5b32edda2276 100644
--- a/arch/arm/mach-spear/spear300.c
+++ b/arch/arm/mach-spear/spear300.c
@@ -4,7 +4,7 @@
4 * SPEAr300 machine source file 4 * SPEAr300 machine source file
5 * 5 *
6 * Copyright (C) 2009-2012 ST Microelectronics 6 * Copyright (C) 2009-2012 ST Microelectronics
7 * Viresh Kumar <viresh.linux@gmail.com> 7 * Viresh Kumar <vireshk@kernel.org>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear/spear310.c b/arch/arm/mach-spear/spear310.c
index ed2029db391f..86a44ac7ff67 100644
--- a/arch/arm/mach-spear/spear310.c
+++ b/arch/arm/mach-spear/spear310.c
@@ -4,7 +4,7 @@
4 * SPEAr310 machine source file 4 * SPEAr310 machine source file
5 * 5 *
6 * Copyright (C) 2009-2012 ST Microelectronics 6 * Copyright (C) 2009-2012 ST Microelectronics
7 * Viresh Kumar <viresh.linux@gmail.com> 7 * Viresh Kumar <vireshk@kernel.org>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear/spear320.c b/arch/arm/mach-spear/spear320.c
index bf634b32a930..d45d751926c5 100644
--- a/arch/arm/mach-spear/spear320.c
+++ b/arch/arm/mach-spear/spear320.c
@@ -4,7 +4,7 @@
4 * SPEAr320 machine source file 4 * SPEAr320 machine source file
5 * 5 *
6 * Copyright (C) 2009-2012 ST Microelectronics 6 * Copyright (C) 2009-2012 ST Microelectronics
7 * Viresh Kumar <viresh.linux@gmail.com> 7 * Viresh Kumar <vireshk@kernel.org>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear/spear3xx.c b/arch/arm/mach-spear/spear3xx.c
index bf3b1fd8cb23..23394ac76cf2 100644
--- a/arch/arm/mach-spear/spear3xx.c
+++ b/arch/arm/mach-spear/spear3xx.c
@@ -4,7 +4,7 @@
4 * SPEAr3XX machines common source file 4 * SPEAr3XX machines common source file
5 * 5 *
6 * Copyright (C) 2009-2012 ST Microelectronics 6 * Copyright (C) 2009-2012 ST Microelectronics
7 * Viresh Kumar <viresh.linux@gmail.com> 7 * Viresh Kumar <vireshk@kernel.org>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 1ced8a0f7a52..cba12f34ff77 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1971,7 +1971,7 @@ static int extend_iommu_mapping(struct dma_iommu_mapping *mapping)
1971{ 1971{
1972 int next_bitmap; 1972 int next_bitmap;
1973 1973
1974 if (mapping->nr_bitmaps > mapping->extensions) 1974 if (mapping->nr_bitmaps >= mapping->extensions)
1975 return -EINVAL; 1975 return -EINVAL;
1976 1976
1977 next_bitmap = mapping->nr_bitmaps; 1977 next_bitmap = mapping->nr_bitmaps;
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 0716bbe19872..de2b246fed38 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -274,7 +274,10 @@ __v7_ca15mp_setup:
274__v7_b15mp_setup: 274__v7_b15mp_setup:
275__v7_ca17mp_setup: 275__v7_ca17mp_setup:
276 mov r10, #0 276 mov r10, #0
2771: 2771: adr r12, __v7_setup_stack @ the local stack
278 stmia r12, {r0-r5, lr} @ v7_invalidate_l1 touches r0-r6
279 bl v7_invalidate_l1
280 ldmia r12, {r0-r5, lr}
278#ifdef CONFIG_SMP 281#ifdef CONFIG_SMP
279 ALT_SMP(mrc p15, 0, r0, c1, c0, 1) 282 ALT_SMP(mrc p15, 0, r0, c1, c0, 1)
280 ALT_UP(mov r0, #(1 << 6)) @ fake it for UP 283 ALT_UP(mov r0, #(1 << 6)) @ fake it for UP
@@ -283,7 +286,7 @@ __v7_ca17mp_setup:
283 orreq r0, r0, r10 @ Enable CPU-specific SMP bits 286 orreq r0, r0, r10 @ Enable CPU-specific SMP bits
284 mcreq p15, 0, r0, c1, c0, 1 287 mcreq p15, 0, r0, c1, c0, 1
285#endif 288#endif
286 b __v7_setup 289 b __v7_setup_cont
287 290
288/* 291/*
289 * Errata: 292 * Errata:
@@ -413,10 +416,11 @@ __v7_pj4b_setup:
413 416
414__v7_setup: 417__v7_setup:
415 adr r12, __v7_setup_stack @ the local stack 418 adr r12, __v7_setup_stack @ the local stack
416 stmia r12, {r0-r5, r7, r9, r11, lr} 419 stmia r12, {r0-r5, lr} @ v7_invalidate_l1 touches r0-r6
417 bl v7_invalidate_l1 420 bl v7_invalidate_l1
418 ldmia r12, {r0-r5, r7, r9, r11, lr} 421 ldmia r12, {r0-r5, lr}
419 422
423__v7_setup_cont:
420 and r0, r9, #0xff000000 @ ARM? 424 and r0, r9, #0xff000000 @ ARM?
421 teq r0, #0x41000000 425 teq r0, #0x41000000
422 bne __errata_finish 426 bne __errata_finish
@@ -480,7 +484,7 @@ ENDPROC(__v7_setup)
480 484
481 .align 2 485 .align 2
482__v7_setup_stack: 486__v7_setup_stack:
483 .space 4 * 11 @ 11 registers 487 .space 4 * 7 @ 12 registers
484 488
485 __INITDATA 489 __INITDATA
486 490
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 4550d247e308..c011e2296cb1 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -74,32 +74,52 @@ struct jit_ctx {
74 74
75int bpf_jit_enable __read_mostly; 75int bpf_jit_enable __read_mostly;
76 76
77static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset) 77static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret,
78 unsigned int size)
79{
80 void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size);
81
82 if (!ptr)
83 return -EFAULT;
84 memcpy(ret, ptr, size);
85 return 0;
86}
87
88static u64 jit_get_skb_b(struct sk_buff *skb, int offset)
78{ 89{
79 u8 ret; 90 u8 ret;
80 int err; 91 int err;
81 92
82 err = skb_copy_bits(skb, offset, &ret, 1); 93 if (offset < 0)
94 err = call_neg_helper(skb, offset, &ret, 1);
95 else
96 err = skb_copy_bits(skb, offset, &ret, 1);
83 97
84 return (u64)err << 32 | ret; 98 return (u64)err << 32 | ret;
85} 99}
86 100
87static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset) 101static u64 jit_get_skb_h(struct sk_buff *skb, int offset)
88{ 102{
89 u16 ret; 103 u16 ret;
90 int err; 104 int err;
91 105
92 err = skb_copy_bits(skb, offset, &ret, 2); 106 if (offset < 0)
107 err = call_neg_helper(skb, offset, &ret, 2);
108 else
109 err = skb_copy_bits(skb, offset, &ret, 2);
93 110
94 return (u64)err << 32 | ntohs(ret); 111 return (u64)err << 32 | ntohs(ret);
95} 112}
96 113
97static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset) 114static u64 jit_get_skb_w(struct sk_buff *skb, int offset)
98{ 115{
99 u32 ret; 116 u32 ret;
100 int err; 117 int err;
101 118
102 err = skb_copy_bits(skb, offset, &ret, 4); 119 if (offset < 0)
120 err = call_neg_helper(skb, offset, &ret, 4);
121 else
122 err = skb_copy_bits(skb, offset, &ret, 4);
103 123
104 return (u64)err << 32 | ntohl(ret); 124 return (u64)err << 32 | ntohl(ret);
105} 125}
@@ -536,9 +556,6 @@ static int build_body(struct jit_ctx *ctx)
536 case BPF_LD | BPF_B | BPF_ABS: 556 case BPF_LD | BPF_B | BPF_ABS:
537 load_order = 0; 557 load_order = 0;
538load: 558load:
539 /* the interpreter will deal with the negative K */
540 if ((int)k < 0)
541 return -ENOTSUPP;
542 emit_mov_i(r_off, k, ctx); 559 emit_mov_i(r_off, k, ctx);
543load_common: 560load_common:
544 ctx->seen |= SEEN_DATA | SEEN_CALL; 561 ctx->seen |= SEEN_DATA | SEEN_CALL;
@@ -547,12 +564,24 @@ load_common:
547 emit(ARM_SUB_I(r_scratch, r_skb_hl, 564 emit(ARM_SUB_I(r_scratch, r_skb_hl,
548 1 << load_order), ctx); 565 1 << load_order), ctx);
549 emit(ARM_CMP_R(r_scratch, r_off), ctx); 566 emit(ARM_CMP_R(r_scratch, r_off), ctx);
550 condt = ARM_COND_HS; 567 condt = ARM_COND_GE;
551 } else { 568 } else {
552 emit(ARM_CMP_R(r_skb_hl, r_off), ctx); 569 emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
553 condt = ARM_COND_HI; 570 condt = ARM_COND_HI;
554 } 571 }
555 572
573 /*
574 * test for negative offset, only if we are
575 * currently scheduled to take the fast
576 * path. this will update the flags so that
577 * the slowpath instruction are ignored if the
578 * offset is negative.
579 *
580 * for loard_order == 0 the HI condition will
581 * make loads at offset 0 take the slow path too.
582 */
583 _emit(condt, ARM_CMP_I(r_off, 0), ctx);
584
556 _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data), 585 _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
557 ctx); 586 ctx);
558 587
@@ -860,9 +889,11 @@ b_epilogue:
860 off = offsetof(struct sk_buff, vlan_tci); 889 off = offsetof(struct sk_buff, vlan_tci);
861 emit(ARM_LDRH_I(r_A, r_skb, off), ctx); 890 emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
862 if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) 891 if (code == (BPF_ANC | SKF_AD_VLAN_TAG))
863 OP_IMM3(ARM_AND, r_A, r_A, VLAN_VID_MASK, ctx); 892 OP_IMM3(ARM_AND, r_A, r_A, ~VLAN_TAG_PRESENT, ctx);
864 else 893 else {
865 OP_IMM3(ARM_AND, r_A, r_A, VLAN_TAG_PRESENT, ctx); 894 OP_IMM3(ARM_LSR, r_A, r_A, 12, ctx);
895 OP_IMM3(ARM_AND, r_A, r_A, 0x1, ctx);
896 }
866 break; 897 break;
867 case BPF_ANC | SKF_AD_QUEUE: 898 case BPF_ANC | SKF_AD_QUEUE:
868 ctx->seen |= SEEN_SKB; 899 ctx->seen |= SEEN_SKB;
diff --git a/arch/arm/vdso/Makefile b/arch/arm/vdso/Makefile
index 9d259d94e429..1160434eece0 100644
--- a/arch/arm/vdso/Makefile
+++ b/arch/arm/vdso/Makefile
@@ -14,7 +14,7 @@ VDSO_LDFLAGS += -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
14VDSO_LDFLAGS += -nostdlib -shared 14VDSO_LDFLAGS += -nostdlib -shared
15VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) 15VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
16VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--build-id) 16VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--build-id)
17VDSO_LDFLAGS += $(call cc-option, -fuse-ld=bfd) 17VDSO_LDFLAGS += $(call cc-ldoption, -fuse-ld=bfd)
18 18
19obj-$(CONFIG_VDSO) += vdso.o 19obj-$(CONFIG_VDSO) += vdso.o
20extra-$(CONFIG_VDSO) += vdso.lds 20extra-$(CONFIG_VDSO) += vdso.lds
diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi
index 0689c3fb56e3..58093edeea2e 100644
--- a/arch/arm64/boot/dts/apm/apm-storm.dtsi
+++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi
@@ -823,7 +823,7 @@
823 device_type = "dma"; 823 device_type = "dma";
824 reg = <0x0 0x1f270000 0x0 0x10000>, 824 reg = <0x0 0x1f270000 0x0 0x10000>,
825 <0x0 0x1f200000 0x0 0x10000>, 825 <0x0 0x1f200000 0x0 0x10000>,
826 <0x0 0x1b008000 0x0 0x2000>, 826 <0x0 0x1b000000 0x0 0x400000>,
827 <0x0 0x1054a000 0x0 0x100>; 827 <0x0 0x1054a000 0x0 0x100>;
828 interrupts = <0x0 0x82 0x4>, 828 interrupts = <0x0 0x82 0x4>,
829 <0x0 0xb8 0x4>, 829 <0x0 0xb8 0x4>,
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index b112a39834d0..70fd9ffb58cf 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -25,6 +25,7 @@ generic-y += kvm_para.h
25generic-y += local.h 25generic-y += local.h
26generic-y += local64.h 26generic-y += local64.h
27generic-y += mcs_spinlock.h 27generic-y += mcs_spinlock.h
28generic-y += mm-arch-hooks.h
28generic-y += mman.h 29generic-y += mman.h
29generic-y += msgbuf.h 30generic-y += msgbuf.h
30generic-y += msi.h 31generic-y += msi.h
diff --git a/arch/arm64/include/asm/mm-arch-hooks.h b/arch/arm64/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 562b655f5ba9..000000000000
--- a/arch/arm64/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_ARM64_MM_ARCH_HOOKS_H
13#define _ASM_ARM64_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_ARM64_MM_ARCH_HOOKS_H */
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 9d4aa18f2a82..e8ca6eaedd02 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -122,12 +122,12 @@ static int __init uefi_init(void)
122 122
123 /* Show what we know for posterity */ 123 /* Show what we know for posterity */
124 c16 = early_memremap(efi_to_phys(efi.systab->fw_vendor), 124 c16 = early_memremap(efi_to_phys(efi.systab->fw_vendor),
125 sizeof(vendor)); 125 sizeof(vendor) * sizeof(efi_char16_t));
126 if (c16) { 126 if (c16) {
127 for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i) 127 for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i)
128 vendor[i] = c16[i]; 128 vendor[i] = c16[i];
129 vendor[i] = '\0'; 129 vendor[i] = '\0';
130 early_memunmap(c16, sizeof(vendor)); 130 early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t));
131 } 131 }
132 132
133 pr_info("EFI v%u.%.02u by %s\n", 133 pr_info("EFI v%u.%.02u by %s\n",
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index f860bfda454a..e16351819fed 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -585,7 +585,8 @@ ENDPROC(el0_irq)
585 * 585 *
586 */ 586 */
587ENTRY(cpu_switch_to) 587ENTRY(cpu_switch_to)
588 add x8, x0, #THREAD_CPU_CONTEXT 588 mov x10, #THREAD_CPU_CONTEXT
589 add x8, x0, x10
589 mov x9, sp 590 mov x9, sp
590 stp x19, x20, [x8], #16 // store callee-saved registers 591 stp x19, x20, [x8], #16 // store callee-saved registers
591 stp x21, x22, [x8], #16 592 stp x21, x22, [x8], #16
@@ -594,7 +595,7 @@ ENTRY(cpu_switch_to)
594 stp x27, x28, [x8], #16 595 stp x27, x28, [x8], #16
595 stp x29, x9, [x8], #16 596 stp x29, x9, [x8], #16
596 str lr, [x8] 597 str lr, [x8]
597 add x8, x1, #THREAD_CPU_CONTEXT 598 add x8, x1, x10
598 ldp x19, x20, [x8], #16 // restore callee-saved registers 599 ldp x19, x20, [x8], #16 // restore callee-saved registers
599 ldp x21, x22, [x8], #16 600 ldp x21, x22, [x8], #16
600 ldp x23, x24, [x8], #16 601 ldp x23, x24, [x8], #16
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index 240b75c0e94f..463fa2e7e34c 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -61,7 +61,7 @@ void __init init_IRQ(void)
61static bool migrate_one_irq(struct irq_desc *desc) 61static bool migrate_one_irq(struct irq_desc *desc)
62{ 62{
63 struct irq_data *d = irq_desc_get_irq_data(desc); 63 struct irq_data *d = irq_desc_get_irq_data(desc);
64 const struct cpumask *affinity = d->affinity; 64 const struct cpumask *affinity = irq_data_get_affinity_mask(d);
65 struct irq_chip *c; 65 struct irq_chip *c;
66 bool ret = false; 66 bool ret = false;
67 67
@@ -81,7 +81,7 @@ static bool migrate_one_irq(struct irq_desc *desc)
81 if (!c->irq_set_affinity) 81 if (!c->irq_set_affinity)
82 pr_debug("IRQ%u: unable to set affinity\n", d->irq); 82 pr_debug("IRQ%u: unable to set affinity\n", d->irq);
83 else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret) 83 else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
84 cpumask_copy(d->affinity, affinity); 84 cpumask_copy(irq_data_get_affinity_mask(d), affinity);
85 85
86 return ret; 86 return ret;
87} 87}
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index 1670f15ef69e..948f0ad2de23 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -168,7 +168,8 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
168 * Other callers might not initialize the si_lsb field, 168 * Other callers might not initialize the si_lsb field,
169 * so check explicitely for the right codes here. 169 * so check explicitely for the right codes here.
170 */ 170 */
171 if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) 171 if (from->si_signo == SIGBUS &&
172 (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
172 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); 173 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
173#endif 174#endif
174 break; 175 break;
@@ -201,8 +202,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
201 202
202int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) 203int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
203{ 204{
204 memset(to, 0, sizeof *to);
205
206 if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) || 205 if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) ||
207 copy_from_user(to->_sifields._pad, 206 copy_from_user(to->_sifields._pad,
208 from->_sifields._pad, SI_PAD_SIZE)) 207 from->_sifields._pad, SI_PAD_SIZE))
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index ec37ab3f524f..97bc68f4c689 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -199,16 +199,15 @@ up_fail:
199 */ 199 */
200void update_vsyscall(struct timekeeper *tk) 200void update_vsyscall(struct timekeeper *tk)
201{ 201{
202 struct timespec xtime_coarse;
203 u32 use_syscall = strcmp(tk->tkr_mono.clock->name, "arch_sys_counter"); 202 u32 use_syscall = strcmp(tk->tkr_mono.clock->name, "arch_sys_counter");
204 203
205 ++vdso_data->tb_seq_count; 204 ++vdso_data->tb_seq_count;
206 smp_wmb(); 205 smp_wmb();
207 206
208 xtime_coarse = __current_kernel_time();
209 vdso_data->use_syscall = use_syscall; 207 vdso_data->use_syscall = use_syscall;
210 vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec; 208 vdso_data->xtime_coarse_sec = tk->xtime_sec;
211 vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec; 209 vdso_data->xtime_coarse_nsec = tk->tkr_mono.xtime_nsec >>
210 tk->tkr_mono.shift;
212 vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec; 211 vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
213 vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec; 212 vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
214 213
diff --git a/arch/avr32/include/asm/Kbuild b/arch/avr32/include/asm/Kbuild
index 1d66afdfac07..f61f2dd67464 100644
--- a/arch/avr32/include/asm/Kbuild
+++ b/arch/avr32/include/asm/Kbuild
@@ -12,6 +12,7 @@ generic-y += irq_work.h
12generic-y += local.h 12generic-y += local.h
13generic-y += local64.h 13generic-y += local64.h
14generic-y += mcs_spinlock.h 14generic-y += mcs_spinlock.h
15generic-y += mm-arch-hooks.h
15generic-y += param.h 16generic-y += param.h
16generic-y += percpu.h 17generic-y += percpu.h
17generic-y += preempt.h 18generic-y += preempt.h
diff --git a/arch/avr32/include/asm/mm-arch-hooks.h b/arch/avr32/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 145452ffbdad..000000000000
--- a/arch/avr32/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_AVR32_MM_ARCH_HOOKS_H
13#define _ASM_AVR32_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_AVR32_MM_ARCH_HOOKS_H */
diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c
index d0f771be9e96..a124c55733db 100644
--- a/arch/avr32/kernel/time.c
+++ b/arch/avr32/kernel/time.c
@@ -18,6 +18,7 @@
18 18
19#include <mach/pm.h> 19#include <mach/pm.h>
20 20
21static bool disable_cpu_idle_poll;
21 22
22static cycle_t read_cycle_count(struct clocksource *cs) 23static cycle_t read_cycle_count(struct clocksource *cs)
23{ 24{
@@ -80,45 +81,45 @@ static int comparator_next_event(unsigned long delta,
80 return 0; 81 return 0;
81} 82}
82 83
83static void comparator_mode(enum clock_event_mode mode, 84static int comparator_shutdown(struct clock_event_device *evdev)
84 struct clock_event_device *evdev)
85{ 85{
86 switch (mode) { 86 pr_debug("%s: %s\n", __func__, evdev->name);
87 case CLOCK_EVT_MODE_ONESHOT: 87 sysreg_write(COMPARE, 0);
88 pr_debug("%s: start\n", evdev->name); 88
89 /* FALLTHROUGH */ 89 if (disable_cpu_idle_poll) {
90 case CLOCK_EVT_MODE_RESUME: 90 disable_cpu_idle_poll = false;
91 /* 91 /*
92 * If we're using the COUNT and COMPARE registers we 92 * Only disable idle poll if we have forced that
93 * need to force idle poll. 93 * in a previous call.
94 */ 94 */
95 cpu_idle_poll_ctrl(true); 95 cpu_idle_poll_ctrl(false);
96 break;
97 case CLOCK_EVT_MODE_UNUSED:
98 case CLOCK_EVT_MODE_SHUTDOWN:
99 sysreg_write(COMPARE, 0);
100 pr_debug("%s: stop\n", evdev->name);
101 if (evdev->mode == CLOCK_EVT_MODE_ONESHOT ||
102 evdev->mode == CLOCK_EVT_MODE_RESUME) {
103 /*
104 * Only disable idle poll if we have forced that
105 * in a previous call.
106 */
107 cpu_idle_poll_ctrl(false);
108 }
109 break;
110 default:
111 BUG();
112 } 96 }
97 return 0;
98}
99
100static int comparator_set_oneshot(struct clock_event_device *evdev)
101{
102 pr_debug("%s: %s\n", __func__, evdev->name);
103
104 disable_cpu_idle_poll = true;
105 /*
106 * If we're using the COUNT and COMPARE registers we
107 * need to force idle poll.
108 */
109 cpu_idle_poll_ctrl(true);
110
111 return 0;
113} 112}
114 113
115static struct clock_event_device comparator = { 114static struct clock_event_device comparator = {
116 .name = "avr32_comparator", 115 .name = "avr32_comparator",
117 .features = CLOCK_EVT_FEAT_ONESHOT, 116 .features = CLOCK_EVT_FEAT_ONESHOT,
118 .shift = 16, 117 .shift = 16,
119 .rating = 50, 118 .rating = 50,
120 .set_next_event = comparator_next_event, 119 .set_next_event = comparator_next_event,
121 .set_mode = comparator_mode, 120 .set_state_shutdown = comparator_shutdown,
121 .set_state_oneshot = comparator_set_oneshot,
122 .tick_resume = comparator_set_oneshot,
122}; 123};
123 124
124void read_persistent_clock(struct timespec *ts) 125void read_persistent_clock(struct timespec *ts)
diff --git a/arch/avr32/mach-at32ap/clock.c b/arch/avr32/mach-at32ap/clock.c
index 23b1a97fae7a..52c179bec0cc 100644
--- a/arch/avr32/mach-at32ap/clock.c
+++ b/arch/avr32/mach-at32ap/clock.c
@@ -80,6 +80,9 @@ int clk_enable(struct clk *clk)
80{ 80{
81 unsigned long flags; 81 unsigned long flags;
82 82
83 if (!clk)
84 return 0;
85
83 spin_lock_irqsave(&clk_lock, flags); 86 spin_lock_irqsave(&clk_lock, flags);
84 __clk_enable(clk); 87 __clk_enable(clk);
85 spin_unlock_irqrestore(&clk_lock, flags); 88 spin_unlock_irqrestore(&clk_lock, flags);
@@ -106,6 +109,9 @@ void clk_disable(struct clk *clk)
106{ 109{
107 unsigned long flags; 110 unsigned long flags;
108 111
112 if (IS_ERR_OR_NULL(clk))
113 return;
114
109 spin_lock_irqsave(&clk_lock, flags); 115 spin_lock_irqsave(&clk_lock, flags);
110 __clk_disable(clk); 116 __clk_disable(clk);
111 spin_unlock_irqrestore(&clk_lock, flags); 117 spin_unlock_irqrestore(&clk_lock, flags);
@@ -117,6 +123,9 @@ unsigned long clk_get_rate(struct clk *clk)
117 unsigned long flags; 123 unsigned long flags;
118 unsigned long rate; 124 unsigned long rate;
119 125
126 if (!clk)
127 return 0;
128
120 spin_lock_irqsave(&clk_lock, flags); 129 spin_lock_irqsave(&clk_lock, flags);
121 rate = clk->get_rate(clk); 130 rate = clk->get_rate(clk);
122 spin_unlock_irqrestore(&clk_lock, flags); 131 spin_unlock_irqrestore(&clk_lock, flags);
@@ -129,6 +138,9 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
129{ 138{
130 unsigned long flags, actual_rate; 139 unsigned long flags, actual_rate;
131 140
141 if (!clk)
142 return 0;
143
132 if (!clk->set_rate) 144 if (!clk->set_rate)
133 return -ENOSYS; 145 return -ENOSYS;
134 146
@@ -145,6 +157,9 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
145 unsigned long flags; 157 unsigned long flags;
146 long ret; 158 long ret;
147 159
160 if (!clk)
161 return 0;
162
148 if (!clk->set_rate) 163 if (!clk->set_rate)
149 return -ENOSYS; 164 return -ENOSYS;
150 165
@@ -161,6 +176,9 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
161 unsigned long flags; 176 unsigned long flags;
162 int ret; 177 int ret;
163 178
179 if (!clk)
180 return 0;
181
164 if (!clk->set_parent) 182 if (!clk->set_parent)
165 return -ENOSYS; 183 return -ENOSYS;
166 184
@@ -174,7 +192,7 @@ EXPORT_SYMBOL(clk_set_parent);
174 192
175struct clk *clk_get_parent(struct clk *clk) 193struct clk *clk_get_parent(struct clk *clk)
176{ 194{
177 return clk->parent; 195 return !clk ? NULL : clk->parent;
178} 196}
179EXPORT_SYMBOL(clk_get_parent); 197EXPORT_SYMBOL(clk_get_parent);
180 198
diff --git a/arch/blackfin/include/asm/Kbuild b/arch/blackfin/include/asm/Kbuild
index 07051a63415d..61cd1e786a14 100644
--- a/arch/blackfin/include/asm/Kbuild
+++ b/arch/blackfin/include/asm/Kbuild
@@ -21,6 +21,7 @@ generic-y += kvm_para.h
21generic-y += local.h 21generic-y += local.h
22generic-y += local64.h 22generic-y += local64.h
23generic-y += mcs_spinlock.h 23generic-y += mcs_spinlock.h
24generic-y += mm-arch-hooks.h
24generic-y += mman.h 25generic-y += mman.h
25generic-y += msgbuf.h 26generic-y += msgbuf.h
26generic-y += mutex.h 27generic-y += mutex.h
diff --git a/arch/blackfin/include/asm/mm-arch-hooks.h b/arch/blackfin/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 1c5211ec338f..000000000000
--- a/arch/blackfin/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_BLACKFIN_MM_ARCH_HOOKS_H
13#define _ASM_BLACKFIN_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_BLACKFIN_MM_ARCH_HOOKS_H */
diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild
index 7aeb32272975..f17c4dc6050c 100644
--- a/arch/c6x/include/asm/Kbuild
+++ b/arch/c6x/include/asm/Kbuild
@@ -26,6 +26,7 @@ generic-y += kdebug.h
26generic-y += kmap_types.h 26generic-y += kmap_types.h
27generic-y += local.h 27generic-y += local.h
28generic-y += mcs_spinlock.h 28generic-y += mcs_spinlock.h
29generic-y += mm-arch-hooks.h
29generic-y += mman.h 30generic-y += mman.h
30generic-y += mmu.h 31generic-y += mmu.h
31generic-y += mmu_context.h 32generic-y += mmu_context.h
diff --git a/arch/c6x/include/asm/mm-arch-hooks.h b/arch/c6x/include/asm/mm-arch-hooks.h
deleted file mode 100644
index bb3c4a6ce8e9..000000000000
--- a/arch/c6x/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_C6X_MM_ARCH_HOOKS_H
13#define _ASM_C6X_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_C6X_MM_ARCH_HOOKS_H */
diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild
index d294f6aaff1d..ad2244f35bca 100644
--- a/arch/cris/include/asm/Kbuild
+++ b/arch/cris/include/asm/Kbuild
@@ -18,6 +18,7 @@ generic-y += linkage.h
18generic-y += local.h 18generic-y += local.h
19generic-y += local64.h 19generic-y += local64.h
20generic-y += mcs_spinlock.h 20generic-y += mcs_spinlock.h
21generic-y += mm-arch-hooks.h
21generic-y += module.h 22generic-y += module.h
22generic-y += percpu.h 23generic-y += percpu.h
23generic-y += preempt.h 24generic-y += preempt.h
diff --git a/arch/cris/include/asm/mm-arch-hooks.h b/arch/cris/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 314f774db2b0..000000000000
--- a/arch/cris/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_CRIS_MM_ARCH_HOOKS_H
13#define _ASM_CRIS_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_CRIS_MM_ARCH_HOOKS_H */
diff --git a/arch/frv/include/asm/Kbuild b/arch/frv/include/asm/Kbuild
index 30edce31e5c2..8e47b832cc76 100644
--- a/arch/frv/include/asm/Kbuild
+++ b/arch/frv/include/asm/Kbuild
@@ -4,5 +4,6 @@ generic-y += cputime.h
4generic-y += exec.h 4generic-y += exec.h
5generic-y += irq_work.h 5generic-y += irq_work.h
6generic-y += mcs_spinlock.h 6generic-y += mcs_spinlock.h
7generic-y += mm-arch-hooks.h
7generic-y += preempt.h 8generic-y += preempt.h
8generic-y += trace_clock.h 9generic-y += trace_clock.h
diff --git a/arch/frv/include/asm/mm-arch-hooks.h b/arch/frv/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 51d13a870404..000000000000
--- a/arch/frv/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_FRV_MM_ARCH_HOOKS_H
13#define _ASM_FRV_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_FRV_MM_ARCH_HOOKS_H */
diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild
index 00379d64f707..70e6ae1e7006 100644
--- a/arch/h8300/include/asm/Kbuild
+++ b/arch/h8300/include/asm/Kbuild
@@ -33,6 +33,7 @@ generic-y += linkage.h
33generic-y += local.h 33generic-y += local.h
34generic-y += local64.h 34generic-y += local64.h
35generic-y += mcs_spinlock.h 35generic-y += mcs_spinlock.h
36generic-y += mm-arch-hooks.h
36generic-y += mman.h 37generic-y += mman.h
37generic-y += mmu.h 38generic-y += mmu.h
38generic-y += mmu_context.h 39generic-y += mmu_context.h
diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild
index 5ade4a163558..daee37bd0999 100644
--- a/arch/hexagon/include/asm/Kbuild
+++ b/arch/hexagon/include/asm/Kbuild
@@ -28,6 +28,7 @@ generic-y += kmap_types.h
28generic-y += local.h 28generic-y += local.h
29generic-y += local64.h 29generic-y += local64.h
30generic-y += mcs_spinlock.h 30generic-y += mcs_spinlock.h
31generic-y += mm-arch-hooks.h
31generic-y += mman.h 32generic-y += mman.h
32generic-y += msgbuf.h 33generic-y += msgbuf.h
33generic-y += pci.h 34generic-y += pci.h
diff --git a/arch/hexagon/include/asm/mm-arch-hooks.h b/arch/hexagon/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 05e8b939e416..000000000000
--- a/arch/hexagon/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_HEXAGON_MM_ARCH_HOOKS_H
13#define _ASM_HEXAGON_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_HEXAGON_MM_ARCH_HOOKS_H */
diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild
index ccff13d33fa2..9de3ba12f6b9 100644
--- a/arch/ia64/include/asm/Kbuild
+++ b/arch/ia64/include/asm/Kbuild
@@ -4,6 +4,7 @@ generic-y += exec.h
4generic-y += irq_work.h 4generic-y += irq_work.h
5generic-y += kvm_para.h 5generic-y += kvm_para.h
6generic-y += mcs_spinlock.h 6generic-y += mcs_spinlock.h
7generic-y += mm-arch-hooks.h
7generic-y += preempt.h 8generic-y += preempt.h
8generic-y += trace_clock.h 9generic-y += trace_clock.h
9generic-y += vtime.h 10generic-y += vtime.h
diff --git a/arch/ia64/include/asm/mm-arch-hooks.h b/arch/ia64/include/asm/mm-arch-hooks.h
deleted file mode 100644
index ab4b5c698322..000000000000
--- a/arch/ia64/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_IA64_MM_ARCH_HOOKS_H
13#define _ASM_IA64_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_IA64_MM_ARCH_HOOKS_H */
diff --git a/arch/m32r/include/asm/Kbuild b/arch/m32r/include/asm/Kbuild
index ba1cdc018731..e0eb704ca1fa 100644
--- a/arch/m32r/include/asm/Kbuild
+++ b/arch/m32r/include/asm/Kbuild
@@ -4,6 +4,7 @@ generic-y += cputime.h
4generic-y += exec.h 4generic-y += exec.h
5generic-y += irq_work.h 5generic-y += irq_work.h
6generic-y += mcs_spinlock.h 6generic-y += mcs_spinlock.h
7generic-y += mm-arch-hooks.h
7generic-y += module.h 8generic-y += module.h
8generic-y += preempt.h 9generic-y += preempt.h
9generic-y += sections.h 10generic-y += sections.h
diff --git a/arch/m32r/include/asm/io.h b/arch/m32r/include/asm/io.h
index 0c3f25ee3381..f8de767ce2bc 100644
--- a/arch/m32r/include/asm/io.h
+++ b/arch/m32r/include/asm/io.h
@@ -174,6 +174,11 @@ static inline void _writel(unsigned long l, unsigned long addr)
174#define iowrite16 writew 174#define iowrite16 writew
175#define iowrite32 writel 175#define iowrite32 writel
176 176
177#define ioread16be(addr) be16_to_cpu(readw(addr))
178#define ioread32be(addr) be32_to_cpu(readl(addr))
179#define iowrite16be(v, addr) writew(cpu_to_be16(v), (addr))
180#define iowrite32be(v, addr) writel(cpu_to_be32(v), (addr))
181
177#define mmiowb() 182#define mmiowb()
178 183
179#define flush_write_buffers() do { } while (0) /* M32R_FIXME */ 184#define flush_write_buffers() do { } while (0) /* M32R_FIXME */
diff --git a/arch/m32r/include/asm/mm-arch-hooks.h b/arch/m32r/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 6d60b4750f41..000000000000
--- a/arch/m32r/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_M32R_MM_ARCH_HOOKS_H
13#define _ASM_M32R_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_M32R_MM_ARCH_HOOKS_H */
diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu
index 33013dfcd3e1..c496d48a8c8d 100644
--- a/arch/m68k/Kconfig.cpu
+++ b/arch/m68k/Kconfig.cpu
@@ -125,6 +125,13 @@ endif # M68KCLASSIC
125 125
126if COLDFIRE 126if COLDFIRE
127 127
128choice
129 prompt "ColdFire SoC type"
130 default M520x
131 help
132 Select the type of ColdFire System-on-Chip (SoC) that you want
133 to build for.
134
128config M5206 135config M5206
129 bool "MCF5206" 136 bool "MCF5206"
130 depends on !MMU 137 depends on !MMU
@@ -174,9 +181,6 @@ config M525x
174 help 181 help
175 Freescale (Motorola) Coldfire 5251/5253 processor support. 182 Freescale (Motorola) Coldfire 5251/5253 processor support.
176 183
177config M527x
178 bool
179
180config M5271 184config M5271
181 bool "MCF5271" 185 bool "MCF5271"
182 depends on !MMU 186 depends on !MMU
@@ -223,9 +227,6 @@ config M5307
223 help 227 help
224 Motorola ColdFire 5307 processor support. 228 Motorola ColdFire 5307 processor support.
225 229
226config M53xx
227 bool
228
229config M532x 230config M532x
230 bool "MCF532x" 231 bool "MCF532x"
231 depends on !MMU 232 depends on !MMU
@@ -251,9 +252,6 @@ config M5407
251 help 252 help
252 Motorola ColdFire 5407 processor support. 253 Motorola ColdFire 5407 processor support.
253 254
254config M54xx
255 bool
256
257config M547x 255config M547x
258 bool "MCF547x" 256 bool "MCF547x"
259 select M54xx 257 select M54xx
@@ -280,6 +278,17 @@ config M5441x
280 help 278 help
281 Freescale Coldfire 54410/54415/54416/54417/54418 processor support. 279 Freescale Coldfire 54410/54415/54416/54417/54418 processor support.
282 280
281endchoice
282
283config M527x
284 bool
285
286config M53xx
287 bool
288
289config M54xx
290 bool
291
283endif # COLDFIRE 292endif # COLDFIRE
284 293
285 294
@@ -416,22 +425,18 @@ config HAVE_MBAR
416config HAVE_IPSBAR 425config HAVE_IPSBAR
417 bool 426 bool
418 427
419config CLOCK_SET
420 bool "Enable setting the CPU clock frequency"
421 depends on COLDFIRE
422 default n
423 help
424 On some CPU's you do not need to know what the core CPU clock
425 frequency is. On these you can disable clock setting. On some
426 traditional 68K parts, and on all ColdFire parts you need to set
427 the appropriate CPU clock frequency. On these devices many of the
428 onboard peripherals derive their timing from the master CPU clock
429 frequency.
430
431config CLOCK_FREQ 428config CLOCK_FREQ
432 int "Set the core clock frequency" 429 int "Set the core clock frequency"
430 default "25000000" if M5206
431 default "54000000" if M5206e
432 default "166666666" if M520x
433 default "140000000" if M5249
434 default "150000000" if M527x || M523x
435 default "90000000" if M5307
436 default "50000000" if M5407
437 default "266000000" if M54xx
433 default "66666666" 438 default "66666666"
434 depends on CLOCK_SET 439 depends on COLDFIRE
435 help 440 help
436 Define the CPU clock frequency in use. This is the core clock 441 Define the CPU clock frequency in use. This is the core clock
437 frequency, it may or may not be the same as the external clock 442 frequency, it may or may not be the same as the external clock
diff --git a/arch/m68k/configs/m5208evb_defconfig b/arch/m68k/configs/m5208evb_defconfig
index e7292f460af4..4c7b7938d53a 100644
--- a/arch/m68k/configs/m5208evb_defconfig
+++ b/arch/m68k/configs/m5208evb_defconfig
@@ -1,10 +1,6 @@
1# CONFIG_MMU is not set
2CONFIG_EXPERIMENTAL=y
3CONFIG_LOG_BUF_SHIFT=14 1CONFIG_LOG_BUF_SHIFT=14
4# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
5CONFIG_EXPERT=y 2CONFIG_EXPERT=y
6# CONFIG_KALLSYMS is not set 3# CONFIG_KALLSYMS is not set
7# CONFIG_HOTPLUG is not set
8# CONFIG_FUTEX is not set 4# CONFIG_FUTEX is not set
9# CONFIG_EPOLL is not set 5# CONFIG_EPOLL is not set
10# CONFIG_SIGNALFD is not set 6# CONFIG_SIGNALFD is not set
@@ -16,17 +12,12 @@ CONFIG_EXPERT=y
16# CONFIG_BLK_DEV_BSG is not set 12# CONFIG_BLK_DEV_BSG is not set
17# CONFIG_IOSCHED_DEADLINE is not set 13# CONFIG_IOSCHED_DEADLINE is not set
18# CONFIG_IOSCHED_CFQ is not set 14# CONFIG_IOSCHED_CFQ is not set
19CONFIG_M520x=y 15# CONFIG_MMU is not set
20CONFIG_CLOCK_SET=y
21CONFIG_CLOCK_FREQ=166666666
22CONFIG_CLOCK_DIV=2
23CONFIG_M5208EVB=y
24# CONFIG_4KSTACKS is not set 16# CONFIG_4KSTACKS is not set
25CONFIG_RAMBASE=0x40000000 17CONFIG_RAMBASE=0x40000000
26CONFIG_RAMSIZE=0x2000000 18CONFIG_RAMSIZE=0x2000000
27CONFIG_VECTORBASE=0x40000000 19CONFIG_VECTORBASE=0x40000000
28CONFIG_KERNELBASE=0x40020000 20CONFIG_KERNELBASE=0x40020000
29CONFIG_RAM16BIT=y
30CONFIG_BINFMT_FLAT=y 21CONFIG_BINFMT_FLAT=y
31CONFIG_NET=y 22CONFIG_NET=y
32CONFIG_PACKET=y 23CONFIG_PACKET=y
@@ -40,24 +31,19 @@ CONFIG_INET=y
40# CONFIG_IPV6 is not set 31# CONFIG_IPV6 is not set
41# CONFIG_FW_LOADER is not set 32# CONFIG_FW_LOADER is not set
42CONFIG_MTD=y 33CONFIG_MTD=y
43CONFIG_MTD_CHAR=y
44CONFIG_MTD_BLOCK=y 34CONFIG_MTD_BLOCK=y
45CONFIG_MTD_RAM=y 35CONFIG_MTD_RAM=y
46CONFIG_MTD_UCLINUX=y 36CONFIG_MTD_UCLINUX=y
47CONFIG_BLK_DEV_RAM=y 37CONFIG_BLK_DEV_RAM=y
48# CONFIG_MISC_DEVICES is not set
49CONFIG_NETDEVICES=y 38CONFIG_NETDEVICES=y
50CONFIG_NET_ETHERNET=y
51CONFIG_FEC=y 39CONFIG_FEC=y
52# CONFIG_NETDEV_1000 is not set
53# CONFIG_NETDEV_10000 is not set
54# CONFIG_INPUT is not set 40# CONFIG_INPUT is not set
55# CONFIG_SERIO is not set 41# CONFIG_SERIO is not set
56# CONFIG_VT is not set 42# CONFIG_VT is not set
43# CONFIG_UNIX98_PTYS is not set
57CONFIG_SERIAL_MCF=y 44CONFIG_SERIAL_MCF=y
58CONFIG_SERIAL_MCF_BAUDRATE=115200 45CONFIG_SERIAL_MCF_BAUDRATE=115200
59CONFIG_SERIAL_MCF_CONSOLE=y 46CONFIG_SERIAL_MCF_CONSOLE=y
60# CONFIG_UNIX98_PTYS is not set
61# CONFIG_HW_RANDOM is not set 47# CONFIG_HW_RANDOM is not set
62# CONFIG_HWMON is not set 48# CONFIG_HWMON is not set
63# CONFIG_USB_SUPPORT is not set 49# CONFIG_USB_SUPPORT is not set
@@ -68,8 +54,6 @@ CONFIG_EXT2_FS=y
68CONFIG_ROMFS_FS=y 54CONFIG_ROMFS_FS=y
69CONFIG_ROMFS_BACKED_BY_MTD=y 55CONFIG_ROMFS_BACKED_BY_MTD=y
70# CONFIG_NETWORK_FILESYSTEMS is not set 56# CONFIG_NETWORK_FILESYSTEMS is not set
71# CONFIG_RCU_CPU_STALL_DETECTOR is not set
72CONFIG_SYSCTL_SYSCALL_CHECK=y
73CONFIG_FULLDEBUG=y
74CONFIG_BOOTPARAM=y 57CONFIG_BOOTPARAM=y
75CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0" 58CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0"
59CONFIG_FULLDEBUG=y
diff --git a/arch/m68k/configs/m5249evb_defconfig b/arch/m68k/configs/m5249evb_defconfig
index 0cd4b39f325b..a782f368650f 100644
--- a/arch/m68k/configs/m5249evb_defconfig
+++ b/arch/m68k/configs/m5249evb_defconfig
@@ -1,10 +1,6 @@
1# CONFIG_MMU is not set
2CONFIG_EXPERIMENTAL=y
3CONFIG_LOG_BUF_SHIFT=14 1CONFIG_LOG_BUF_SHIFT=14
4# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
5CONFIG_EXPERT=y 2CONFIG_EXPERT=y
6# CONFIG_KALLSYMS is not set 3# CONFIG_KALLSYMS is not set
7# CONFIG_HOTPLUG is not set
8# CONFIG_FUTEX is not set 4# CONFIG_FUTEX is not set
9# CONFIG_EPOLL is not set 5# CONFIG_EPOLL is not set
10# CONFIG_SIGNALFD is not set 6# CONFIG_SIGNALFD is not set
@@ -16,10 +12,8 @@ CONFIG_EXPERT=y
16# CONFIG_BLK_DEV_BSG is not set 12# CONFIG_BLK_DEV_BSG is not set
17# CONFIG_IOSCHED_DEADLINE is not set 13# CONFIG_IOSCHED_DEADLINE is not set
18# CONFIG_IOSCHED_CFQ is not set 14# CONFIG_IOSCHED_CFQ is not set
15# CONFIG_MMU is not set
19CONFIG_M5249=y 16CONFIG_M5249=y
20CONFIG_CLOCK_SET=y
21CONFIG_CLOCK_FREQ=140000000
22CONFIG_CLOCK_DIV=2
23CONFIG_M5249C3=y 17CONFIG_M5249C3=y
24CONFIG_RAMBASE=0x00000000 18CONFIG_RAMBASE=0x00000000
25CONFIG_RAMSIZE=0x00800000 19CONFIG_RAMSIZE=0x00800000
@@ -38,23 +32,18 @@ CONFIG_INET=y
38# CONFIG_IPV6 is not set 32# CONFIG_IPV6 is not set
39# CONFIG_FW_LOADER is not set 33# CONFIG_FW_LOADER is not set
40CONFIG_MTD=y 34CONFIG_MTD=y
41CONFIG_MTD_CHAR=y
42CONFIG_MTD_BLOCK=y 35CONFIG_MTD_BLOCK=y
43CONFIG_MTD_RAM=y 36CONFIG_MTD_RAM=y
44CONFIG_MTD_UCLINUX=y 37CONFIG_MTD_UCLINUX=y
45CONFIG_BLK_DEV_RAM=y 38CONFIG_BLK_DEV_RAM=y
46# CONFIG_MISC_DEVICES is not set
47CONFIG_NETDEVICES=y 39CONFIG_NETDEVICES=y
48CONFIG_NET_ETHERNET=y
49# CONFIG_NETDEV_1000 is not set
50# CONFIG_NETDEV_10000 is not set
51CONFIG_PPP=y 40CONFIG_PPP=y
52# CONFIG_INPUT is not set 41# CONFIG_INPUT is not set
53# CONFIG_SERIO is not set 42# CONFIG_SERIO is not set
54# CONFIG_VT is not set 43# CONFIG_VT is not set
44# CONFIG_UNIX98_PTYS is not set
55CONFIG_SERIAL_MCF=y 45CONFIG_SERIAL_MCF=y
56CONFIG_SERIAL_MCF_CONSOLE=y 46CONFIG_SERIAL_MCF_CONSOLE=y
57# CONFIG_UNIX98_PTYS is not set
58# CONFIG_HWMON is not set 47# CONFIG_HWMON is not set
59# CONFIG_USB_SUPPORT is not set 48# CONFIG_USB_SUPPORT is not set
60CONFIG_EXT2_FS=y 49CONFIG_EXT2_FS=y
@@ -62,7 +51,5 @@ CONFIG_EXT2_FS=y
62CONFIG_ROMFS_FS=y 51CONFIG_ROMFS_FS=y
63CONFIG_ROMFS_BACKED_BY_MTD=y 52CONFIG_ROMFS_BACKED_BY_MTD=y
64# CONFIG_NETWORK_FILESYSTEMS is not set 53# CONFIG_NETWORK_FILESYSTEMS is not set
65# CONFIG_RCU_CPU_STALL_DETECTOR is not set
66CONFIG_BOOTPARAM=y 54CONFIG_BOOTPARAM=y
67CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0" 55CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0"
68# CONFIG_CRC32 is not set
diff --git a/arch/m68k/configs/m5272c3_defconfig b/arch/m68k/configs/m5272c3_defconfig
index a60cb3509135..6f5fb92f5cbf 100644
--- a/arch/m68k/configs/m5272c3_defconfig
+++ b/arch/m68k/configs/m5272c3_defconfig
@@ -1,10 +1,6 @@
1# CONFIG_MMU is not set
2CONFIG_EXPERIMENTAL=y
3CONFIG_LOG_BUF_SHIFT=14 1CONFIG_LOG_BUF_SHIFT=14
4# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
5CONFIG_EXPERT=y 2CONFIG_EXPERT=y
6# CONFIG_KALLSYMS is not set 3# CONFIG_KALLSYMS is not set
7# CONFIG_HOTPLUG is not set
8# CONFIG_FUTEX is not set 4# CONFIG_FUTEX is not set
9# CONFIG_EPOLL is not set 5# CONFIG_EPOLL is not set
10# CONFIG_SIGNALFD is not set 6# CONFIG_SIGNALFD is not set
@@ -16,8 +12,8 @@ CONFIG_EXPERT=y
16# CONFIG_BLK_DEV_BSG is not set 12# CONFIG_BLK_DEV_BSG is not set
17# CONFIG_IOSCHED_DEADLINE is not set 13# CONFIG_IOSCHED_DEADLINE is not set
18# CONFIG_IOSCHED_CFQ is not set 14# CONFIG_IOSCHED_CFQ is not set
15# CONFIG_MMU is not set
19CONFIG_M5272=y 16CONFIG_M5272=y
20CONFIG_CLOCK_SET=y
21CONFIG_M5272C3=y 17CONFIG_M5272C3=y
22CONFIG_RAMBASE=0x00000000 18CONFIG_RAMBASE=0x00000000
23CONFIG_RAMSIZE=0x00800000 19CONFIG_RAMSIZE=0x00800000
@@ -36,23 +32,18 @@ CONFIG_INET=y
36# CONFIG_IPV6 is not set 32# CONFIG_IPV6 is not set
37# CONFIG_FW_LOADER is not set 33# CONFIG_FW_LOADER is not set
38CONFIG_MTD=y 34CONFIG_MTD=y
39CONFIG_MTD_CHAR=y
40CONFIG_MTD_BLOCK=y 35CONFIG_MTD_BLOCK=y
41CONFIG_MTD_RAM=y 36CONFIG_MTD_RAM=y
42CONFIG_MTD_UCLINUX=y 37CONFIG_MTD_UCLINUX=y
43CONFIG_BLK_DEV_RAM=y 38CONFIG_BLK_DEV_RAM=y
44# CONFIG_MISC_DEVICES is not set
45CONFIG_NETDEVICES=y 39CONFIG_NETDEVICES=y
46CONFIG_NET_ETHERNET=y
47CONFIG_FEC=y 40CONFIG_FEC=y
48# CONFIG_NETDEV_1000 is not set
49# CONFIG_NETDEV_10000 is not set
50# CONFIG_INPUT is not set 41# CONFIG_INPUT is not set
51# CONFIG_SERIO is not set 42# CONFIG_SERIO is not set
52# CONFIG_VT is not set 43# CONFIG_VT is not set
44# CONFIG_UNIX98_PTYS is not set
53CONFIG_SERIAL_MCF=y 45CONFIG_SERIAL_MCF=y
54CONFIG_SERIAL_MCF_CONSOLE=y 46CONFIG_SERIAL_MCF_CONSOLE=y
55# CONFIG_UNIX98_PTYS is not set
56# CONFIG_HWMON is not set 47# CONFIG_HWMON is not set
57# CONFIG_USB_SUPPORT is not set 48# CONFIG_USB_SUPPORT is not set
58CONFIG_EXT2_FS=y 49CONFIG_EXT2_FS=y
@@ -61,6 +52,5 @@ CONFIG_EXT2_FS=y
61CONFIG_ROMFS_FS=y 52CONFIG_ROMFS_FS=y
62CONFIG_ROMFS_BACKED_BY_MTD=y 53CONFIG_ROMFS_BACKED_BY_MTD=y
63# CONFIG_NETWORK_FILESYSTEMS is not set 54# CONFIG_NETWORK_FILESYSTEMS is not set
64# CONFIG_RCU_CPU_STALL_DETECTOR is not set
65CONFIG_BOOTPARAM=y 55CONFIG_BOOTPARAM=y
66CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0" 56CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0"
diff --git a/arch/m68k/configs/m5275evb_defconfig b/arch/m68k/configs/m5275evb_defconfig
index e6502ab7cb2f..b5d7cd1ce856 100644
--- a/arch/m68k/configs/m5275evb_defconfig
+++ b/arch/m68k/configs/m5275evb_defconfig
@@ -1,10 +1,6 @@
1# CONFIG_MMU is not set
2CONFIG_EXPERIMENTAL=y
3CONFIG_LOG_BUF_SHIFT=14 1CONFIG_LOG_BUF_SHIFT=14
4# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
5CONFIG_EXPERT=y 2CONFIG_EXPERT=y
6# CONFIG_KALLSYMS is not set 3# CONFIG_KALLSYMS is not set
7# CONFIG_HOTPLUG is not set
8# CONFIG_FUTEX is not set 4# CONFIG_FUTEX is not set
9# CONFIG_EPOLL is not set 5# CONFIG_EPOLL is not set
10# CONFIG_SIGNALFD is not set 6# CONFIG_SIGNALFD is not set
@@ -16,11 +12,8 @@ CONFIG_EXPERT=y
16# CONFIG_BLK_DEV_BSG is not set 12# CONFIG_BLK_DEV_BSG is not set
17# CONFIG_IOSCHED_DEADLINE is not set 13# CONFIG_IOSCHED_DEADLINE is not set
18# CONFIG_IOSCHED_CFQ is not set 14# CONFIG_IOSCHED_CFQ is not set
15# CONFIG_MMU is not set
19CONFIG_M5275=y 16CONFIG_M5275=y
20CONFIG_CLOCK_SET=y
21CONFIG_CLOCK_FREQ=150000000
22CONFIG_CLOCK_DIV=2
23CONFIG_M5275EVB=y
24# CONFIG_4KSTACKS is not set 17# CONFIG_4KSTACKS is not set
25CONFIG_RAMBASE=0x00000000 18CONFIG_RAMBASE=0x00000000
26CONFIG_RAMSIZE=0x00000000 19CONFIG_RAMSIZE=0x00000000
@@ -39,24 +32,19 @@ CONFIG_INET=y
39# CONFIG_IPV6 is not set 32# CONFIG_IPV6 is not set
40# CONFIG_FW_LOADER is not set 33# CONFIG_FW_LOADER is not set
41CONFIG_MTD=y 34CONFIG_MTD=y
42CONFIG_MTD_CHAR=y
43CONFIG_MTD_BLOCK=y 35CONFIG_MTD_BLOCK=y
44CONFIG_MTD_RAM=y 36CONFIG_MTD_RAM=y
45CONFIG_MTD_UCLINUX=y 37CONFIG_MTD_UCLINUX=y
46CONFIG_BLK_DEV_RAM=y 38CONFIG_BLK_DEV_RAM=y
47# CONFIG_MISC_DEVICES is not set
48CONFIG_NETDEVICES=y 39CONFIG_NETDEVICES=y
49CONFIG_NET_ETHERNET=y
50CONFIG_FEC=y 40CONFIG_FEC=y
51# CONFIG_NETDEV_1000 is not set
52# CONFIG_NETDEV_10000 is not set
53CONFIG_PPP=y 41CONFIG_PPP=y
54# CONFIG_INPUT is not set 42# CONFIG_INPUT is not set
55# CONFIG_SERIO is not set 43# CONFIG_SERIO is not set
56# CONFIG_VT is not set 44# CONFIG_VT is not set
45# CONFIG_UNIX98_PTYS is not set
57CONFIG_SERIAL_MCF=y 46CONFIG_SERIAL_MCF=y
58CONFIG_SERIAL_MCF_CONSOLE=y 47CONFIG_SERIAL_MCF_CONSOLE=y
59# CONFIG_UNIX98_PTYS is not set
60# CONFIG_HWMON is not set 48# CONFIG_HWMON is not set
61# CONFIG_USB_SUPPORT is not set 49# CONFIG_USB_SUPPORT is not set
62CONFIG_EXT2_FS=y 50CONFIG_EXT2_FS=y
@@ -65,8 +53,5 @@ CONFIG_EXT2_FS=y
65CONFIG_ROMFS_FS=y 53CONFIG_ROMFS_FS=y
66CONFIG_ROMFS_BACKED_BY_MTD=y 54CONFIG_ROMFS_BACKED_BY_MTD=y
67# CONFIG_NETWORK_FILESYSTEMS is not set 55# CONFIG_NETWORK_FILESYSTEMS is not set
68# CONFIG_RCU_CPU_STALL_DETECTOR is not set
69CONFIG_SYSCTL_SYSCALL_CHECK=y
70CONFIG_BOOTPARAM=y 56CONFIG_BOOTPARAM=y
71CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0" 57CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0"
72# CONFIG_CRC32 is not set
diff --git a/arch/m68k/configs/m5307c3_defconfig b/arch/m68k/configs/m5307c3_defconfig
index 023812abd2e6..1b4c09461c40 100644
--- a/arch/m68k/configs/m5307c3_defconfig
+++ b/arch/m68k/configs/m5307c3_defconfig
@@ -1,10 +1,6 @@
1# CONFIG_MMU is not set
2CONFIG_EXPERIMENTAL=y
3CONFIG_LOG_BUF_SHIFT=14 1CONFIG_LOG_BUF_SHIFT=14
4# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
5CONFIG_EXPERT=y 2CONFIG_EXPERT=y
6# CONFIG_KALLSYMS is not set 3# CONFIG_KALLSYMS is not set
7# CONFIG_HOTPLUG is not set
8# CONFIG_FUTEX is not set 4# CONFIG_FUTEX is not set
9# CONFIG_EPOLL is not set 5# CONFIG_EPOLL is not set
10# CONFIG_SIGNALFD is not set 6# CONFIG_SIGNALFD is not set
@@ -16,10 +12,8 @@ CONFIG_EXPERT=y
16# CONFIG_BLK_DEV_BSG is not set 12# CONFIG_BLK_DEV_BSG is not set
17# CONFIG_IOSCHED_DEADLINE is not set 13# CONFIG_IOSCHED_DEADLINE is not set
18# CONFIG_IOSCHED_CFQ is not set 14# CONFIG_IOSCHED_CFQ is not set
15# CONFIG_MMU is not set
19CONFIG_M5307=y 16CONFIG_M5307=y
20CONFIG_CLOCK_SET=y
21CONFIG_CLOCK_FREQ=90000000
22CONFIG_CLOCK_DIV=2
23CONFIG_M5307C3=y 17CONFIG_M5307C3=y
24CONFIG_RAMBASE=0x00000000 18CONFIG_RAMBASE=0x00000000
25CONFIG_RAMSIZE=0x00800000 19CONFIG_RAMSIZE=0x00800000
@@ -38,16 +32,11 @@ CONFIG_INET=y
38# CONFIG_IPV6 is not set 32# CONFIG_IPV6 is not set
39# CONFIG_FW_LOADER is not set 33# CONFIG_FW_LOADER is not set
40CONFIG_MTD=y 34CONFIG_MTD=y
41CONFIG_MTD_CHAR=y
42CONFIG_MTD_BLOCK=y 35CONFIG_MTD_BLOCK=y
43CONFIG_MTD_RAM=y 36CONFIG_MTD_RAM=y
44CONFIG_MTD_UCLINUX=y 37CONFIG_MTD_UCLINUX=y
45CONFIG_BLK_DEV_RAM=y 38CONFIG_BLK_DEV_RAM=y
46# CONFIG_MISC_DEVICES is not set
47CONFIG_NETDEVICES=y 39CONFIG_NETDEVICES=y
48CONFIG_NET_ETHERNET=y
49# CONFIG_NETDEV_1000 is not set
50# CONFIG_NETDEV_10000 is not set
51CONFIG_PPP=y 40CONFIG_PPP=y
52CONFIG_SLIP=y 41CONFIG_SLIP=y
53CONFIG_SLIP_COMPRESSED=y 42CONFIG_SLIP_COMPRESSED=y
@@ -56,21 +45,17 @@ CONFIG_SLIP_COMPRESSED=y
56# CONFIG_INPUT_MOUSE is not set 45# CONFIG_INPUT_MOUSE is not set
57# CONFIG_SERIO is not set 46# CONFIG_SERIO is not set
58# CONFIG_VT is not set 47# CONFIG_VT is not set
48# CONFIG_LEGACY_PTYS is not set
59CONFIG_SERIAL_MCF=y 49CONFIG_SERIAL_MCF=y
60CONFIG_SERIAL_MCF_CONSOLE=y 50CONFIG_SERIAL_MCF_CONSOLE=y
61# CONFIG_LEGACY_PTYS is not set
62# CONFIG_HW_RANDOM is not set 51# CONFIG_HW_RANDOM is not set
63# CONFIG_HWMON is not set 52# CONFIG_HWMON is not set
64# CONFIG_HID_SUPPORT is not set
65# CONFIG_USB_SUPPORT is not set 53# CONFIG_USB_SUPPORT is not set
66CONFIG_EXT2_FS=y 54CONFIG_EXT2_FS=y
67# CONFIG_DNOTIFY is not set 55# CONFIG_DNOTIFY is not set
68CONFIG_ROMFS_FS=y 56CONFIG_ROMFS_FS=y
69CONFIG_ROMFS_BACKED_BY_MTD=y 57CONFIG_ROMFS_BACKED_BY_MTD=y
70# CONFIG_NETWORK_FILESYSTEMS is not set 58# CONFIG_NETWORK_FILESYSTEMS is not set
71# CONFIG_RCU_CPU_STALL_DETECTOR is not set
72CONFIG_SYSCTL_SYSCALL_CHECK=y
73CONFIG_FULLDEBUG=y
74CONFIG_BOOTPARAM=y 59CONFIG_BOOTPARAM=y
75CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0" 60CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0"
76# CONFIG_CRC32 is not set 61CONFIG_FULLDEBUG=y
diff --git a/arch/m68k/configs/m5407c3_defconfig b/arch/m68k/configs/m5407c3_defconfig
index 557b39f3be90..275ad543d4bc 100644
--- a/arch/m68k/configs/m5407c3_defconfig
+++ b/arch/m68k/configs/m5407c3_defconfig
@@ -1,10 +1,6 @@
1# CONFIG_MMU is not set
2CONFIG_EXPERIMENTAL=y
3CONFIG_LOG_BUF_SHIFT=14 1CONFIG_LOG_BUF_SHIFT=14
4# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
5CONFIG_EXPERT=y 2CONFIG_EXPERT=y
6# CONFIG_KALLSYMS is not set 3# CONFIG_KALLSYMS is not set
7# CONFIG_HOTPLUG is not set
8# CONFIG_FUTEX is not set 4# CONFIG_FUTEX is not set
9# CONFIG_EPOLL is not set 5# CONFIG_EPOLL is not set
10# CONFIG_SIGNALFD is not set 6# CONFIG_SIGNALFD is not set
@@ -17,9 +13,8 @@ CONFIG_MODULE_UNLOAD=y
17# CONFIG_BLK_DEV_BSG is not set 13# CONFIG_BLK_DEV_BSG is not set
18# CONFIG_IOSCHED_DEADLINE is not set 14# CONFIG_IOSCHED_DEADLINE is not set
19# CONFIG_IOSCHED_CFQ is not set 15# CONFIG_IOSCHED_CFQ is not set
16# CONFIG_MMU is not set
20CONFIG_M5407=y 17CONFIG_M5407=y
21CONFIG_CLOCK_SET=y
22CONFIG_CLOCK_FREQ=50000000
23CONFIG_M5407C3=y 18CONFIG_M5407C3=y
24CONFIG_RAMBASE=0x00000000 19CONFIG_RAMBASE=0x00000000
25CONFIG_RAMSIZE=0x00000000 20CONFIG_RAMSIZE=0x00000000
@@ -38,22 +33,17 @@ CONFIG_INET=y
38# CONFIG_IPV6 is not set 33# CONFIG_IPV6 is not set
39# CONFIG_FW_LOADER is not set 34# CONFIG_FW_LOADER is not set
40CONFIG_MTD=y 35CONFIG_MTD=y
41CONFIG_MTD_CHAR=y
42CONFIG_MTD_BLOCK=y 36CONFIG_MTD_BLOCK=y
43CONFIG_MTD_RAM=y 37CONFIG_MTD_RAM=y
44CONFIG_MTD_UCLINUX=y 38CONFIG_MTD_UCLINUX=y
45CONFIG_BLK_DEV_RAM=y 39CONFIG_BLK_DEV_RAM=y
46# CONFIG_MISC_DEVICES is not set
47CONFIG_NETDEVICES=y 40CONFIG_NETDEVICES=y
48CONFIG_NET_ETHERNET=y
49# CONFIG_NETDEV_1000 is not set
50# CONFIG_NETDEV_10000 is not set
51CONFIG_PPP=y 41CONFIG_PPP=y
52# CONFIG_INPUT is not set 42# CONFIG_INPUT is not set
53# CONFIG_VT is not set 43# CONFIG_VT is not set
44# CONFIG_UNIX98_PTYS is not set
54CONFIG_SERIAL_MCF=y 45CONFIG_SERIAL_MCF=y
55CONFIG_SERIAL_MCF_CONSOLE=y 46CONFIG_SERIAL_MCF_CONSOLE=y
56# CONFIG_UNIX98_PTYS is not set
57# CONFIG_HW_RANDOM is not set 47# CONFIG_HW_RANDOM is not set
58# CONFIG_HWMON is not set 48# CONFIG_HWMON is not set
59# CONFIG_USB_SUPPORT is not set 49# CONFIG_USB_SUPPORT is not set
@@ -63,8 +53,5 @@ CONFIG_EXT2_FS=y
63CONFIG_ROMFS_FS=y 53CONFIG_ROMFS_FS=y
64CONFIG_ROMFS_BACKED_BY_MTD=y 54CONFIG_ROMFS_BACKED_BY_MTD=y
65# CONFIG_NETWORK_FILESYSTEMS is not set 55# CONFIG_NETWORK_FILESYSTEMS is not set
66# CONFIG_RCU_CPU_STALL_DETECTOR is not set
67CONFIG_SYSCTL_SYSCALL_CHECK=y
68CONFIG_BOOTPARAM=y 56CONFIG_BOOTPARAM=y
69CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0" 57CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0"
70# CONFIG_CRC32 is not set
diff --git a/arch/m68k/configs/m5475evb_defconfig b/arch/m68k/configs/m5475evb_defconfig
index c5018a68819b..4f4ccd13c11b 100644
--- a/arch/m68k/configs/m5475evb_defconfig
+++ b/arch/m68k/configs/m5475evb_defconfig
@@ -1,11 +1,7 @@
1CONFIG_EXPERIMENTAL=y
2# CONFIG_SWAP is not set 1# CONFIG_SWAP is not set
3CONFIG_LOG_BUF_SHIFT=14 2CONFIG_LOG_BUF_SHIFT=14
4CONFIG_SYSFS_DEPRECATED=y
5CONFIG_SYSFS_DEPRECATED_V2=y
6CONFIG_SYSCTL_SYSCALL=y 3CONFIG_SYSCTL_SYSCALL=y
7# CONFIG_KALLSYMS is not set 4# CONFIG_KALLSYMS is not set
8# CONFIG_HOTPLUG is not set
9# CONFIG_FUTEX is not set 5# CONFIG_FUTEX is not set
10# CONFIG_EPOLL is not set 6# CONFIG_EPOLL is not set
11# CONFIG_SIGNALFD is not set 7# CONFIG_SIGNALFD is not set
@@ -20,19 +16,16 @@ CONFIG_MODULES=y
20# CONFIG_IOSCHED_DEADLINE is not set 16# CONFIG_IOSCHED_DEADLINE is not set
21# CONFIG_IOSCHED_CFQ is not set 17# CONFIG_IOSCHED_CFQ is not set
22CONFIG_COLDFIRE=y 18CONFIG_COLDFIRE=y
23CONFIG_M547x=y
24CONFIG_CLOCK_SET=y
25CONFIG_CLOCK_FREQ=266000000
26# CONFIG_4KSTACKS is not set 19# CONFIG_4KSTACKS is not set
27CONFIG_RAMBASE=0x0 20CONFIG_RAMBASE=0x0
28CONFIG_RAMSIZE=0x2000000 21CONFIG_RAMSIZE=0x2000000
29CONFIG_VECTORBASE=0x0 22CONFIG_VECTORBASE=0x0
30CONFIG_MBAR=0xff000000 23CONFIG_MBAR=0xff000000
31CONFIG_KERNELBASE=0x20000 24CONFIG_KERNELBASE=0x20000
25CONFIG_PCI=y
32# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 26# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
33# CONFIG_FW_LOADER is not set 27# CONFIG_FW_LOADER is not set
34CONFIG_MTD=y 28CONFIG_MTD=y
35CONFIG_MTD_CHAR=y
36CONFIG_MTD_BLOCK=y 29CONFIG_MTD_BLOCK=y
37CONFIG_MTD_CFI=y 30CONFIG_MTD_CFI=y
38CONFIG_MTD_JEDECPROBE=y 31CONFIG_MTD_JEDECPROBE=y
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index 1555bc189c7d..eb85bd9c6180 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -18,6 +18,7 @@ generic-y += kvm_para.h
18generic-y += local.h 18generic-y += local.h
19generic-y += local64.h 19generic-y += local64.h
20generic-y += mcs_spinlock.h 20generic-y += mcs_spinlock.h
21generic-y += mm-arch-hooks.h
21generic-y += mman.h 22generic-y += mman.h
22generic-y += mutex.h 23generic-y += mutex.h
23generic-y += percpu.h 24generic-y += percpu.h
diff --git a/arch/m68k/include/asm/coldfire.h b/arch/m68k/include/asm/coldfire.h
index c94557b91448..50aa4dac9ca2 100644
--- a/arch/m68k/include/asm/coldfire.h
+++ b/arch/m68k/include/asm/coldfire.h
@@ -19,7 +19,7 @@
19 * in any case new boards come along from time to time that have yet 19 * in any case new boards come along from time to time that have yet
20 * another different clocking frequency. 20 * another different clocking frequency.
21 */ 21 */
22#ifdef CONFIG_CLOCK_SET 22#ifdef CONFIG_CLOCK_FREQ
23#define MCF_CLK CONFIG_CLOCK_FREQ 23#define MCF_CLK CONFIG_CLOCK_FREQ
24#else 24#else
25#error "Don't know what your ColdFire CPU clock frequency is??" 25#error "Don't know what your ColdFire CPU clock frequency is??"
diff --git a/arch/m68k/include/asm/io_mm.h b/arch/m68k/include/asm/io_mm.h
index 618c85d3c786..f55cad529400 100644
--- a/arch/m68k/include/asm/io_mm.h
+++ b/arch/m68k/include/asm/io_mm.h
@@ -413,7 +413,8 @@ static inline void isa_delay(void)
413#define writew(val, addr) out_le16((addr), (val)) 413#define writew(val, addr) out_le16((addr), (val))
414#endif /* CONFIG_ATARI_ROM_ISA */ 414#endif /* CONFIG_ATARI_ROM_ISA */
415 415
416#if !defined(CONFIG_ISA) && !defined(CONFIG_ATARI_ROM_ISA) 416#if !defined(CONFIG_ISA) && !defined(CONFIG_ATARI_ROM_ISA) && \
417 !(defined(CONFIG_PCI) && defined(CONFIG_COLDFIRE))
417/* 418/*
418 * We need to define dummy functions for GENERIC_IOMAP support. 419 * We need to define dummy functions for GENERIC_IOMAP support.
419 */ 420 */
diff --git a/arch/m68k/include/asm/mm-arch-hooks.h b/arch/m68k/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 7e8709bc90ae..000000000000
--- a/arch/m68k/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_M68K_MM_ARCH_HOOKS_H
13#define _ASM_M68K_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_M68K_MM_ARCH_HOOKS_H */
diff --git a/arch/metag/include/asm/Kbuild b/arch/metag/include/asm/Kbuild
index 199320f3c345..df31353fd200 100644
--- a/arch/metag/include/asm/Kbuild
+++ b/arch/metag/include/asm/Kbuild
@@ -25,6 +25,7 @@ generic-y += kvm_para.h
25generic-y += local.h 25generic-y += local.h
26generic-y += local64.h 26generic-y += local64.h
27generic-y += mcs_spinlock.h 27generic-y += mcs_spinlock.h
28generic-y += mm-arch-hooks.h
28generic-y += msgbuf.h 29generic-y += msgbuf.h
29generic-y += mutex.h 30generic-y += mutex.h
30generic-y += param.h 31generic-y += param.h
diff --git a/arch/metag/include/asm/mm-arch-hooks.h b/arch/metag/include/asm/mm-arch-hooks.h
deleted file mode 100644
index b0072b2eb0de..000000000000
--- a/arch/metag/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_METAG_MM_ARCH_HOOKS_H
13#define _ASM_METAG_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_METAG_MM_ARCH_HOOKS_H */
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
index 9989ddb169ca..2f222f355c4b 100644
--- a/arch/microblaze/include/asm/Kbuild
+++ b/arch/microblaze/include/asm/Kbuild
@@ -6,6 +6,7 @@ generic-y += device.h
6generic-y += exec.h 6generic-y += exec.h
7generic-y += irq_work.h 7generic-y += irq_work.h
8generic-y += mcs_spinlock.h 8generic-y += mcs_spinlock.h
9generic-y += mm-arch-hooks.h
9generic-y += preempt.h 10generic-y += preempt.h
10generic-y += syscalls.h 11generic-y += syscalls.h
11generic-y += trace_clock.h 12generic-y += trace_clock.h
diff --git a/arch/microblaze/include/asm/mm-arch-hooks.h b/arch/microblaze/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 5c4065911bda..000000000000
--- a/arch/microblaze/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_MICROBLAZE_MM_ARCH_HOOKS_H
13#define _ASM_MICROBLAZE_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_MICROBLAZE_MM_ARCH_HOOKS_H */
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index aab7e46cadd5..199a8357838c 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -151,7 +151,6 @@ config BMIPS_GENERIC
151 select BCM7120_L2_IRQ 151 select BCM7120_L2_IRQ
152 select BRCMSTB_L2_IRQ 152 select BRCMSTB_L2_IRQ
153 select IRQ_MIPS_CPU 153 select IRQ_MIPS_CPU
154 select RAW_IRQ_ACCESSORS
155 select DMA_NONCOHERENT 154 select DMA_NONCOHERENT
156 select SYS_SUPPORTS_32BIT_KERNEL 155 select SYS_SUPPORTS_32BIT_KERNEL
157 select SYS_SUPPORTS_LITTLE_ENDIAN 156 select SYS_SUPPORTS_LITTLE_ENDIAN
@@ -1427,6 +1426,7 @@ config CPU_MIPS64_R6
1427 select CPU_SUPPORTS_HIGHMEM 1426 select CPU_SUPPORTS_HIGHMEM
1428 select CPU_SUPPORTS_MSA 1427 select CPU_SUPPORTS_MSA
1429 select GENERIC_CSUM 1428 select GENERIC_CSUM
1429 select MIPS_O32_FP64_SUPPORT if MIPS32_O32
1430 help 1430 help
1431 Choose this option to build a kernel for release 6 or later of the 1431 Choose this option to build a kernel for release 6 or later of the
1432 MIPS64 architecture. New MIPS processors, starting with the Warrior 1432 MIPS64 architecture. New MIPS processors, starting with the Warrior
@@ -2262,11 +2262,6 @@ config MIPS_CM
2262config MIPS_CPC 2262config MIPS_CPC
2263 bool 2263 bool
2264 2264
2265config SB1_PASS_1_WORKAROUNDS
2266 bool
2267 depends on CPU_SB1_PASS_1
2268 default y
2269
2270config SB1_PASS_2_WORKAROUNDS 2265config SB1_PASS_2_WORKAROUNDS
2271 bool 2266 bool
2272 depends on CPU_SB1 && (CPU_SB1_PASS_2_2 || CPU_SB1_PASS_2) 2267 depends on CPU_SB1 && (CPU_SB1_PASS_2_2 || CPU_SB1_PASS_2)
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index ae2dd59050f7..252e347958f3 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -181,13 +181,6 @@ cflags-$(CONFIG_CPU_R4000_WORKAROUNDS) += $(call cc-option,-mfix-r4000,)
181cflags-$(CONFIG_CPU_R4400_WORKAROUNDS) += $(call cc-option,-mfix-r4400,) 181cflags-$(CONFIG_CPU_R4400_WORKAROUNDS) += $(call cc-option,-mfix-r4400,)
182cflags-$(CONFIG_CPU_DADDI_WORKAROUNDS) += $(call cc-option,-mno-daddi,) 182cflags-$(CONFIG_CPU_DADDI_WORKAROUNDS) += $(call cc-option,-mno-daddi,)
183 183
184ifdef CONFIG_CPU_SB1
185ifdef CONFIG_SB1_PASS_1_WORKAROUNDS
186KBUILD_AFLAGS_MODULE += -msb1-pass1-workarounds
187KBUILD_CFLAGS_MODULE += -msb1-pass1-workarounds
188endif
189endif
190
191# For smartmips configurations, there are hundreds of warnings due to ISA overrides 184# For smartmips configurations, there are hundreds of warnings due to ISA overrides
192# in assembly and header files. smartmips is only supported for MIPS32r1 onwards 185# in assembly and header files. smartmips is only supported for MIPS32r1 onwards
193# and there is no support for 64-bit. Various '.set mips2' or '.set mips3' or 186# and there is no support for 64-bit. Various '.set mips2' or '.set mips3' or
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index 01a644f174dd..1ba21204ebe0 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -190,6 +190,7 @@ int get_c0_perfcount_int(void)
190{ 190{
191 return ATH79_MISC_IRQ(5); 191 return ATH79_MISC_IRQ(5);
192} 192}
193EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
193 194
194unsigned int get_c0_compare_int(void) 195unsigned int get_c0_compare_int(void)
195{ 196{
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index 56f5d080ef9d..b7fa9ae28c36 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -42,7 +42,7 @@ static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
42 cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action); 42 cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action);
43 43
44 if (action & SMP_CALL_FUNCTION) 44 if (action & SMP_CALL_FUNCTION)
45 smp_call_function_interrupt(); 45 generic_smp_call_function_interrupt();
46 if (action & SMP_RESCHEDULE_YOURSELF) 46 if (action & SMP_RESCHEDULE_YOURSELF)
47 scheduler_ipi(); 47 scheduler_ipi();
48 48
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index 7fe5c61a3cb8..1f8546081d20 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -7,6 +7,7 @@ generic-y += emergency-restart.h
7generic-y += irq_work.h 7generic-y += irq_work.h
8generic-y += local64.h 8generic-y += local64.h
9generic-y += mcs_spinlock.h 9generic-y += mcs_spinlock.h
10generic-y += mm-arch-hooks.h
10generic-y += mutex.h 11generic-y += mutex.h
11generic-y += parport.h 12generic-y += parport.h
12generic-y += percpu.h 13generic-y += percpu.h
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index 084780b355aa..1b0625189835 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -74,7 +74,7 @@ static inline int __enable_fpu(enum fpu_mode mode)
74 goto fr_common; 74 goto fr_common;
75 75
76 case FPU_64BIT: 76 case FPU_64BIT:
77#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) \ 77#if !(defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) \
78 || defined(CONFIG_64BIT)) 78 || defined(CONFIG_64BIT))
79 /* we only have a 32-bit FPU */ 79 /* we only have a 32-bit FPU */
80 return SIGFPE; 80 return SIGFPE;
diff --git a/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h b/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h
deleted file mode 100644
index 11d3b572b1b3..000000000000
--- a/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h
+++ /dev/null
@@ -1,10 +0,0 @@
1#ifndef __ASM_MACH_BCM63XX_DMA_COHERENCE_H
2#define __ASM_MACH_BCM63XX_DMA_COHERENCE_H
3
4#include <asm/bmips.h>
5
6#define plat_post_dma_flush bmips_post_dma_flush
7
8#include <asm/mach-generic/dma-coherence.h>
9
10#endif /* __ASM_MACH_BCM63XX_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-sibyte/war.h b/arch/mips/include/asm/mach-sibyte/war.h
index 0a227d426b9c..520f8fc2c806 100644
--- a/arch/mips/include/asm/mach-sibyte/war.h
+++ b/arch/mips/include/asm/mach-sibyte/war.h
@@ -13,8 +13,7 @@
13#define R4600_V2_HIT_CACHEOP_WAR 0 13#define R4600_V2_HIT_CACHEOP_WAR 0
14#define R5432_CP0_INTERRUPT_WAR 0 14#define R5432_CP0_INTERRUPT_WAR 0
15 15
16#if defined(CONFIG_SB1_PASS_1_WORKAROUNDS) || \ 16#if defined(CONFIG_SB1_PASS_2_WORKAROUNDS)
17 defined(CONFIG_SB1_PASS_2_WORKAROUNDS)
18 17
19#ifndef __ASSEMBLY__ 18#ifndef __ASSEMBLY__
20extern int sb1250_m3_workaround_needed(void); 19extern int sb1250_m3_workaround_needed(void);
diff --git a/arch/mips/include/asm/mm-arch-hooks.h b/arch/mips/include/asm/mm-arch-hooks.h
deleted file mode 100644
index b5609fe8e475..000000000000
--- a/arch/mips/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_MIPS_MM_ARCH_HOOKS_H
13#define _ASM_MIPS_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_MIPS_MM_ARCH_HOOKS_H */
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 9d8106758142..ae8569475264 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -182,8 +182,39 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
182 * Make sure the buddy is global too (if it's !none, 182 * Make sure the buddy is global too (if it's !none,
183 * it better already be global) 183 * it better already be global)
184 */ 184 */
185#ifdef CONFIG_SMP
186 /*
187 * For SMP, multiple CPUs can race, so we need to do
188 * this atomically.
189 */
190#ifdef CONFIG_64BIT
191#define LL_INSN "lld"
192#define SC_INSN "scd"
193#else /* CONFIG_32BIT */
194#define LL_INSN "ll"
195#define SC_INSN "sc"
196#endif
197 unsigned long page_global = _PAGE_GLOBAL;
198 unsigned long tmp;
199
200 __asm__ __volatile__ (
201 " .set push\n"
202 " .set noreorder\n"
203 "1: " LL_INSN " %[tmp], %[buddy]\n"
204 " bnez %[tmp], 2f\n"
205 " or %[tmp], %[tmp], %[global]\n"
206 " " SC_INSN " %[tmp], %[buddy]\n"
207 " beqz %[tmp], 1b\n"
208 " nop\n"
209 "2:\n"
210 " .set pop"
211 : [buddy] "+m" (buddy->pte),
212 [tmp] "=&r" (tmp)
213 : [global] "r" (page_global));
214#else /* !CONFIG_SMP */
185 if (pte_none(*buddy)) 215 if (pte_none(*buddy))
186 pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL; 216 pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
217#endif /* CONFIG_SMP */
187 } 218 }
188#endif 219#endif
189} 220}
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index 16f1ea9ab191..03722d4326a1 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -83,8 +83,6 @@ static inline void __cpu_die(unsigned int cpu)
83extern void play_dead(void); 83extern void play_dead(void);
84#endif 84#endif
85 85
86extern asmlinkage void smp_call_function_interrupt(void);
87
88static inline void arch_send_call_function_single_ipi(int cpu) 86static inline void arch_send_call_function_single_ipi(int cpu)
89{ 87{
90 extern struct plat_smp_ops *mp_ops; /* private */ 88 extern struct plat_smp_ops *mp_ops; /* private */
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index 28d6d9364bd1..a71da576883c 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -152,6 +152,31 @@
152 .set noreorder 152 .set noreorder
153 bltz k0, 8f 153 bltz k0, 8f
154 move k1, sp 154 move k1, sp
155#ifdef CONFIG_EVA
156 /*
157 * Flush interAptiv's Return Prediction Stack (RPS) by writing
158 * EntryHi. Toggling Config7.RPS is slower and less portable.
159 *
160 * The RPS isn't automatically flushed when exceptions are
161 * taken, which can result in kernel mode speculative accesses
162 * to user addresses if the RPS mispredicts. That's harmless
163 * when user and kernel share the same address space, but with
164 * EVA the same user segments may be unmapped to kernel mode,
165 * even containing sensitive MMIO regions or invalid memory.
166 *
167 * This can happen when the kernel sets the return address to
168 * ret_from_* and jr's to the exception handler, which looks
169 * more like a tail call than a function call. If nested calls
170 * don't evict the last user address in the RPS, it will
171 * mispredict the return and fetch from a user controlled
172 * address into the icache.
173 *
174 * More recent EVA-capable cores with MAAR to restrict
175 * speculative accesses aren't affected.
176 */
177 MFC0 k0, CP0_ENTRYHI
178 MTC0 k0, CP0_ENTRYHI
179#endif
155 .set reorder 180 .set reorder
156 /* Called from user mode, new stack. */ 181 /* Called from user mode, new stack. */
157 get_saved_sp 182 get_saved_sp
diff --git a/arch/mips/include/uapi/asm/sigcontext.h b/arch/mips/include/uapi/asm/sigcontext.h
index 6c9906f59c6e..9081d88ae44f 100644
--- a/arch/mips/include/uapi/asm/sigcontext.h
+++ b/arch/mips/include/uapi/asm/sigcontext.h
@@ -16,7 +16,7 @@
16 16
17/* 17/*
18 * Keep this struct definition in sync with the sigcontext fragment 18 * Keep this struct definition in sync with the sigcontext fragment
19 * in arch/mips/tools/offset.c 19 * in arch/mips/kernel/asm-offsets.c
20 */ 20 */
21struct sigcontext { 21struct sigcontext {
22 unsigned int sc_regmask; /* Unused */ 22 unsigned int sc_regmask; /* Unused */
@@ -46,7 +46,7 @@ struct sigcontext {
46#include <linux/posix_types.h> 46#include <linux/posix_types.h>
47/* 47/*
48 * Keep this struct definition in sync with the sigcontext fragment 48 * Keep this struct definition in sync with the sigcontext fragment
49 * in arch/mips/tools/offset.c 49 * in arch/mips/kernel/asm-offsets.c
50 * 50 *
51 * Warning: this structure illdefined with sc_badvaddr being just an unsigned 51 * Warning: this structure illdefined with sc_badvaddr being just an unsigned
52 * int so it was changed to unsigned long in 2.6.0-test1. This may break 52 * int so it was changed to unsigned long in 2.6.0-test1. This may break
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index beabe19ff8e5..072fab13645d 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * offset.c: Calculate pt_regs and task_struct offsets. 2 * asm-offsets.c: Calculate pt_regs and task_struct offsets.
3 * 3 *
4 * Copyright (C) 1996 David S. Miller 4 * Copyright (C) 1996 David S. Miller
5 * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003 Ralf Baechle 5 * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003 Ralf Baechle
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index af42e7003f12..baa7b6fc0a60 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -407,7 +407,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
407 .set noat 407 .set noat
408 SAVE_ALL 408 SAVE_ALL
409 FEXPORT(handle_\exception\ext) 409 FEXPORT(handle_\exception\ext)
410 __BUILD_clear_\clear 410 __build_clear_\clear
411 .set at 411 .set at
412 __BUILD_\verbose \exception 412 __BUILD_\verbose \exception
413 move a0, sp 413 move a0, sp
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
index 3e4491aa6d6b..789d7bf4fef3 100644
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -154,7 +154,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
154 unsigned long __user *user_mask_ptr) 154 unsigned long __user *user_mask_ptr)
155{ 155{
156 unsigned int real_len; 156 unsigned int real_len;
157 cpumask_t mask; 157 cpumask_t allowed, mask;
158 int retval; 158 int retval;
159 struct task_struct *p; 159 struct task_struct *p;
160 160
@@ -173,7 +173,8 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
173 if (retval) 173 if (retval)
174 goto out_unlock; 174 goto out_unlock;
175 175
176 cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask); 176 cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed);
177 cpumask_and(&mask, &allowed, cpu_active_mask);
177 178
178out_unlock: 179out_unlock:
179 read_unlock(&tasklist_lock); 180 read_unlock(&tasklist_lock);
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
index b130033838ba..5fcec3032f38 100644
--- a/arch/mips/kernel/prom.c
+++ b/arch/mips/kernel/prom.c
@@ -38,7 +38,7 @@ char *mips_get_machine_name(void)
38 return mips_machine_name; 38 return mips_machine_name;
39} 39}
40 40
41#ifdef CONFIG_OF 41#ifdef CONFIG_USE_OF
42void __init early_init_dt_add_memory_arch(u64 base, u64 size) 42void __init early_init_dt_add_memory_arch(u64 base, u64 size)
43{ 43{
44 return add_memory_region(base, size, BOOT_MEM_RAM); 44 return add_memory_region(base, size, BOOT_MEM_RAM);
diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S
index 74bab9ddd0e1..c6bbf2165051 100644
--- a/arch/mips/kernel/relocate_kernel.S
+++ b/arch/mips/kernel/relocate_kernel.S
@@ -24,7 +24,7 @@ LEAF(relocate_new_kernel)
24 24
25process_entry: 25process_entry:
26 PTR_L s2, (s0) 26 PTR_L s2, (s0)
27 PTR_ADD s0, s0, SZREG 27 PTR_ADDIU s0, s0, SZREG
28 28
29 /* 29 /*
30 * In case of a kdump/crash kernel, the indirection page is not 30 * In case of a kdump/crash kernel, the indirection page is not
@@ -61,9 +61,9 @@ copy_word:
61 /* copy page word by word */ 61 /* copy page word by word */
62 REG_L s5, (s2) 62 REG_L s5, (s2)
63 REG_S s5, (s4) 63 REG_S s5, (s4)
64 PTR_ADD s4, s4, SZREG 64 PTR_ADDIU s4, s4, SZREG
65 PTR_ADD s2, s2, SZREG 65 PTR_ADDIU s2, s2, SZREG
66 LONG_SUB s6, s6, 1 66 LONG_ADDIU s6, s6, -1
67 beq s6, zero, process_entry 67 beq s6, zero, process_entry
68 b copy_word 68 b copy_word
69 b process_entry 69 b process_entry
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index ad4d44635c76..a6f6b762c47a 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -80,7 +80,7 @@ syscall_trace_entry:
80 SAVE_STATIC 80 SAVE_STATIC
81 move s0, t2 81 move s0, t2
82 move a0, sp 82 move a0, sp
83 daddiu a1, v0, __NR_64_Linux 83 move a1, v0
84 jal syscall_trace_enter 84 jal syscall_trace_enter
85 85
86 bltz v0, 2f # seccomp failed? Skip syscall 86 bltz v0, 2f # seccomp failed? Skip syscall
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 446cc654da56..4b2010654c46 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -72,7 +72,7 @@ n32_syscall_trace_entry:
72 SAVE_STATIC 72 SAVE_STATIC
73 move s0, t2 73 move s0, t2
74 move a0, sp 74 move a0, sp
75 daddiu a1, v0, __NR_N32_Linux 75 move a1, v0
76 jal syscall_trace_enter 76 jal syscall_trace_enter
77 77
78 bltz v0, 2f # seccomp failed? Skip syscall 78 bltz v0, 2f # seccomp failed? Skip syscall
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 19a7705f2a01..5d7f2634996f 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -409,8 +409,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
409 409
410int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) 410int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
411{ 411{
412 memset(to, 0, sizeof *to);
413
414 if (copy_from_user(to, from, 3*sizeof(int)) || 412 if (copy_from_user(to, from, 3*sizeof(int)) ||
415 copy_from_user(to->_sifields._pad, 413 copy_from_user(to->_sifields._pad,
416 from->_sifields._pad, SI_PAD_SIZE32)) 414 from->_sifields._pad, SI_PAD_SIZE32))
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index 336708ae5c5b..78cf8c2f1de0 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -284,7 +284,7 @@ static irqreturn_t bmips5000_ipi_interrupt(int irq, void *dev_id)
284 if (action == 0) 284 if (action == 0)
285 scheduler_ipi(); 285 scheduler_ipi();
286 else 286 else
287 smp_call_function_interrupt(); 287 generic_smp_call_function_interrupt();
288 288
289 return IRQ_HANDLED; 289 return IRQ_HANDLED;
290} 290}
@@ -336,7 +336,7 @@ static irqreturn_t bmips43xx_ipi_interrupt(int irq, void *dev_id)
336 if (action & SMP_RESCHEDULE_YOURSELF) 336 if (action & SMP_RESCHEDULE_YOURSELF)
337 scheduler_ipi(); 337 scheduler_ipi();
338 if (action & SMP_CALL_FUNCTION) 338 if (action & SMP_CALL_FUNCTION)
339 smp_call_function_interrupt(); 339 generic_smp_call_function_interrupt();
340 340
341 return IRQ_HANDLED; 341 return IRQ_HANDLED;
342} 342}
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index d0744cc77ea7..a31896c33716 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -192,16 +192,6 @@ asmlinkage void start_secondary(void)
192 cpu_startup_entry(CPUHP_ONLINE); 192 cpu_startup_entry(CPUHP_ONLINE);
193} 193}
194 194
195/*
196 * Call into both interrupt handlers, as we share the IPI for them
197 */
198void __irq_entry smp_call_function_interrupt(void)
199{
200 irq_enter();
201 generic_smp_call_function_interrupt();
202 irq_exit();
203}
204
205static void stop_this_cpu(void *dummy) 195static void stop_this_cpu(void *dummy)
206{ 196{
207 /* 197 /*
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index e207a43b5f8f..8ea28e6ab37d 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -192,6 +192,7 @@ static void show_stacktrace(struct task_struct *task,
192void show_stack(struct task_struct *task, unsigned long *sp) 192void show_stack(struct task_struct *task, unsigned long *sp)
193{ 193{
194 struct pt_regs regs; 194 struct pt_regs regs;
195 mm_segment_t old_fs = get_fs();
195 if (sp) { 196 if (sp) {
196 regs.regs[29] = (unsigned long)sp; 197 regs.regs[29] = (unsigned long)sp;
197 regs.regs[31] = 0; 198 regs.regs[31] = 0;
@@ -210,7 +211,13 @@ void show_stack(struct task_struct *task, unsigned long *sp)
210 prepare_frametrace(&regs); 211 prepare_frametrace(&regs);
211 } 212 }
212 } 213 }
214 /*
215 * show_stack() deals exclusively with kernel mode, so be sure to access
216 * the stack in the kernel (not user) address space.
217 */
218 set_fs(KERNEL_DS);
213 show_stacktrace(task, &regs); 219 show_stacktrace(task, &regs);
220 set_fs(old_fs);
214} 221}
215 222
216static void show_code(unsigned int __user *pc) 223static void show_code(unsigned int __user *pc)
@@ -1519,6 +1526,7 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
1519 const int field = 2 * sizeof(unsigned long); 1526 const int field = 2 * sizeof(unsigned long);
1520 int multi_match = regs->cp0_status & ST0_TS; 1527 int multi_match = regs->cp0_status & ST0_TS;
1521 enum ctx_state prev_state; 1528 enum ctx_state prev_state;
1529 mm_segment_t old_fs = get_fs();
1522 1530
1523 prev_state = exception_enter(); 1531 prev_state = exception_enter();
1524 show_regs(regs); 1532 show_regs(regs);
@@ -1540,8 +1548,13 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
1540 dump_tlb_all(); 1548 dump_tlb_all();
1541 } 1549 }
1542 1550
1551 if (!user_mode(regs))
1552 set_fs(KERNEL_DS);
1553
1543 show_code((unsigned int __user *) regs->cp0_epc); 1554 show_code((unsigned int __user *) regs->cp0_epc);
1544 1555
1556 set_fs(old_fs);
1557
1545 /* 1558 /*
1546 * Some chips may have other causes of machine check (e.g. SB1 1559 * Some chips may have other causes of machine check (e.g. SB1
1547 * graduation timer) 1560 * graduation timer)
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index af84bef0c90d..eb3efd137fd1 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -438,7 +438,7 @@ do { \
438 : "memory"); \ 438 : "memory"); \
439} while(0) 439} while(0)
440 440
441#define StoreDW(addr, value, res) \ 441#define _StoreDW(addr, value, res) \
442do { \ 442do { \
443 __asm__ __volatile__ ( \ 443 __asm__ __volatile__ ( \
444 ".set\tpush\n\t" \ 444 ".set\tpush\n\t" \
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index 6ab10573490d..2c218c3bbca5 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -293,7 +293,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
293 293
294static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) 294static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
295{ 295{
296 smp_call_function_interrupt(); 296 generic_smp_call_function_interrupt();
297 return IRQ_HANDLED; 297 return IRQ_HANDLED;
298} 298}
299 299
@@ -466,6 +466,7 @@ int get_c0_perfcount_int(void)
466{ 466{
467 return ltq_perfcount_irq; 467 return ltq_perfcount_irq;
468} 468}
469EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
469 470
470unsigned int get_c0_compare_int(void) 471unsigned int get_c0_compare_int(void)
471{ 472{
diff --git a/arch/mips/loongson64/loongson-3/smp.c b/arch/mips/loongson64/loongson-3/smp.c
index 509877c6e9d9..1a4738a8f2d3 100644
--- a/arch/mips/loongson64/loongson-3/smp.c
+++ b/arch/mips/loongson64/loongson-3/smp.c
@@ -266,8 +266,11 @@ void loongson3_ipi_interrupt(struct pt_regs *regs)
266 if (action & SMP_RESCHEDULE_YOURSELF) 266 if (action & SMP_RESCHEDULE_YOURSELF)
267 scheduler_ipi(); 267 scheduler_ipi();
268 268
269 if (action & SMP_CALL_FUNCTION) 269 if (action & SMP_CALL_FUNCTION) {
270 smp_call_function_interrupt(); 270 irq_enter();
271 generic_smp_call_function_interrupt();
272 irq_exit();
273 }
271 274
272 if (action & SMP_ASK_C0COUNT) { 275 if (action & SMP_ASK_C0COUNT) {
273 BUG_ON(cpu != 0); 276 BUG_ON(cpu != 0);
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 77d96db8253c..aab218c36e0d 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -160,18 +160,18 @@ static inline void setup_protection_map(void)
160 protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); 160 protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
161 protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); 161 protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
162 protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); 162 protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
163 protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ); 163 protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
164 protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT); 164 protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
165 protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ); 165 protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
166 protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT); 166 protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
167 167
168 protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); 168 protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
169 protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); 169 protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
170 protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ); 170 protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ);
171 protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE); 171 protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
172 protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ); 172 protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
173 protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT); 173 protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
174 protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE | _PAGE_NO_READ); 174 protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
175 protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE); 175 protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
176 176
177 } else { 177 } else {
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 36c0f26fac6b..852a41c6da45 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -133,7 +133,8 @@ good_area:
133#endif 133#endif
134 goto bad_area; 134 goto bad_area;
135 } 135 }
136 if (!(vma->vm_flags & VM_READ)) { 136 if (!(vma->vm_flags & VM_READ) &&
137 exception_epc(regs) != address) {
137#if 0 138#if 0
138 pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n", 139 pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n",
139 raw_smp_processor_id(), 140 raw_smp_processor_id(),
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index d1392f8f5811..fa8f591f3713 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -222,7 +222,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
222 222
223static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) 223static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
224{ 224{
225 smp_call_function_interrupt(); 225 generic_smp_call_function_interrupt();
226 226
227 return IRQ_HANDLED; 227 return IRQ_HANDLED;
228} 228}
diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c
index 5625b190edc0..b7bf721eabf5 100644
--- a/arch/mips/mti-malta/malta-time.c
+++ b/arch/mips/mti-malta/malta-time.c
@@ -154,6 +154,7 @@ int get_c0_perfcount_int(void)
154 154
155 return mips_cpu_perf_irq; 155 return mips_cpu_perf_irq;
156} 156}
157EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
157 158
158unsigned int get_c0_compare_int(void) 159unsigned int get_c0_compare_int(void)
159{ 160{
@@ -171,14 +172,17 @@ unsigned int get_c0_compare_int(void)
171 172
172static void __init init_rtc(void) 173static void __init init_rtc(void)
173{ 174{
174 /* stop the clock whilst setting it up */ 175 unsigned char freq, ctrl;
175 CMOS_WRITE(RTC_SET | RTC_24H, RTC_CONTROL);
176 176
177 /* 32KHz time base */ 177 /* Set 32KHz time base if not already set */
178 CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT); 178 freq = CMOS_READ(RTC_FREQ_SELECT);
179 if ((freq & RTC_DIV_CTL) != RTC_REF_CLCK_32KHZ)
180 CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT);
179 181
180 /* start the clock */ 182 /* Ensure SET bit is clear so RTC can run */
181 CMOS_WRITE(RTC_24H, RTC_CONTROL); 183 ctrl = CMOS_READ(RTC_CONTROL);
184 if (ctrl & RTC_SET)
185 CMOS_WRITE(ctrl & ~RTC_SET, RTC_CONTROL);
182} 186}
183 187
184void __init plat_time_init(void) 188void __init plat_time_init(void)
diff --git a/arch/mips/mti-sead3/sead3-time.c b/arch/mips/mti-sead3/sead3-time.c
index e1d69895fb1d..a120b7a5a8fe 100644
--- a/arch/mips/mti-sead3/sead3-time.c
+++ b/arch/mips/mti-sead3/sead3-time.c
@@ -77,6 +77,7 @@ int get_c0_perfcount_int(void)
77 return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; 77 return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
78 return -1; 78 return -1;
79} 79}
80EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
80 81
81unsigned int get_c0_compare_int(void) 82unsigned int get_c0_compare_int(void)
82{ 83{
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
index dc3e327fbbac..f5fff228b347 100644
--- a/arch/mips/netlogic/common/smp.c
+++ b/arch/mips/netlogic/common/smp.c
@@ -86,7 +86,7 @@ void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc)
86{ 86{
87 clear_c0_eimr(irq); 87 clear_c0_eimr(irq);
88 ack_c0_eirr(irq); 88 ack_c0_eirr(irq);
89 smp_call_function_interrupt(); 89 generic_smp_call_function_interrupt();
90 set_c0_eimr(irq); 90 set_c0_eimr(irq);
91} 91}
92 92
diff --git a/arch/mips/paravirt/paravirt-smp.c b/arch/mips/paravirt/paravirt-smp.c
index 42181c7105df..f8d3e081b2eb 100644
--- a/arch/mips/paravirt/paravirt-smp.c
+++ b/arch/mips/paravirt/paravirt-smp.c
@@ -114,7 +114,7 @@ static irqreturn_t paravirt_reched_interrupt(int irq, void *dev_id)
114 114
115static irqreturn_t paravirt_function_interrupt(int irq, void *dev_id) 115static irqreturn_t paravirt_function_interrupt(int irq, void *dev_id)
116{ 116{
117 smp_call_function_interrupt(); 117 generic_smp_call_function_interrupt();
118 return IRQ_HANDLED; 118 return IRQ_HANDLED;
119} 119}
120 120
diff --git a/arch/mips/pistachio/time.c b/arch/mips/pistachio/time.c
index 7c73fcb92a10..8a377346f0ca 100644
--- a/arch/mips/pistachio/time.c
+++ b/arch/mips/pistachio/time.c
@@ -26,6 +26,7 @@ int get_c0_perfcount_int(void)
26{ 26{
27 return gic_get_c0_perfcount_int(); 27 return gic_get_c0_perfcount_int();
28} 28}
29EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
29 30
30int get_c0_fdc_int(void) 31int get_c0_fdc_int(void)
31{ 32{
diff --git a/arch/mips/pmcs-msp71xx/msp_smp.c b/arch/mips/pmcs-msp71xx/msp_smp.c
index 10170580a2de..ffa0f7101a97 100644
--- a/arch/mips/pmcs-msp71xx/msp_smp.c
+++ b/arch/mips/pmcs-msp71xx/msp_smp.c
@@ -44,7 +44,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
44 44
45static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) 45static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
46{ 46{
47 smp_call_function_interrupt(); 47 generic_smp_call_function_interrupt();
48 48
49 return IRQ_HANDLED; 49 return IRQ_HANDLED;
50} 50}
diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c
index 53707aacc0f8..8c624a8b9ea2 100644
--- a/arch/mips/ralink/irq.c
+++ b/arch/mips/ralink/irq.c
@@ -89,6 +89,7 @@ int get_c0_perfcount_int(void)
89{ 89{
90 return rt_perfcount_irq; 90 return rt_perfcount_irq;
91} 91}
92EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
92 93
93unsigned int get_c0_compare_int(void) 94unsigned int get_c0_compare_int(void)
94{ 95{
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c
index 3fbaef97a1b8..16ec4e12daa3 100644
--- a/arch/mips/sgi-ip27/ip27-irq.c
+++ b/arch/mips/sgi-ip27/ip27-irq.c
@@ -107,10 +107,14 @@ static void ip27_do_irq_mask0(void)
107 scheduler_ipi(); 107 scheduler_ipi();
108 } else if (pend0 & (1UL << CPU_CALL_A_IRQ)) { 108 } else if (pend0 & (1UL << CPU_CALL_A_IRQ)) {
109 LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ); 109 LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ);
110 smp_call_function_interrupt(); 110 irq_enter();
111 generic_smp_call_function_interrupt();
112 irq_exit();
111 } else if (pend0 & (1UL << CPU_CALL_B_IRQ)) { 113 } else if (pend0 & (1UL << CPU_CALL_B_IRQ)) {
112 LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ); 114 LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ);
113 smp_call_function_interrupt(); 115 irq_enter();
116 generic_smp_call_function_interrupt();
117 irq_exit();
114 } else 118 } else
115#endif 119#endif
116 { 120 {
diff --git a/arch/mips/sibyte/Kconfig b/arch/mips/sibyte/Kconfig
index a8bb972fd9fd..cb9a095f5c5e 100644
--- a/arch/mips/sibyte/Kconfig
+++ b/arch/mips/sibyte/Kconfig
@@ -81,11 +81,6 @@ choice
81 prompt "SiByte SOC Stepping" 81 prompt "SiByte SOC Stepping"
82 depends on SIBYTE_SB1xxx_SOC 82 depends on SIBYTE_SB1xxx_SOC
83 83
84config CPU_SB1_PASS_1
85 bool "1250 Pass1"
86 depends on SIBYTE_SB1250
87 select CPU_HAS_PREFETCH
88
89config CPU_SB1_PASS_2_1250 84config CPU_SB1_PASS_2_1250
90 bool "1250 An" 85 bool "1250 An"
91 depends on SIBYTE_SB1250 86 depends on SIBYTE_SB1250
diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c
index af7d44edd9a8..4c71aea25663 100644
--- a/arch/mips/sibyte/bcm1480/smp.c
+++ b/arch/mips/sibyte/bcm1480/smp.c
@@ -29,8 +29,6 @@
29#include <asm/sibyte/bcm1480_regs.h> 29#include <asm/sibyte/bcm1480_regs.h>
30#include <asm/sibyte/bcm1480_int.h> 30#include <asm/sibyte/bcm1480_int.h>
31 31
32extern void smp_call_function_interrupt(void);
33
34/* 32/*
35 * These are routines for dealing with the bcm1480 smp capabilities 33 * These are routines for dealing with the bcm1480 smp capabilities
36 * independent of board/firmware 34 * independent of board/firmware
@@ -184,6 +182,9 @@ void bcm1480_mailbox_interrupt(void)
184 if (action & SMP_RESCHEDULE_YOURSELF) 182 if (action & SMP_RESCHEDULE_YOURSELF)
185 scheduler_ipi(); 183 scheduler_ipi();
186 184
187 if (action & SMP_CALL_FUNCTION) 185 if (action & SMP_CALL_FUNCTION) {
188 smp_call_function_interrupt(); 186 irq_enter();
187 generic_smp_call_function_interrupt();
188 irq_exit();
189 }
189} 190}
diff --git a/arch/mips/sibyte/common/bus_watcher.c b/arch/mips/sibyte/common/bus_watcher.c
index 5581844c9194..41a1d2242211 100644
--- a/arch/mips/sibyte/common/bus_watcher.c
+++ b/arch/mips/sibyte/common/bus_watcher.c
@@ -81,10 +81,7 @@ void check_bus_watcher(void)
81{ 81{
82 u32 status, l2_err, memio_err; 82 u32 status, l2_err, memio_err;
83 83
84#ifdef CONFIG_SB1_PASS_1_WORKAROUNDS 84#if defined(CONFIG_SIBYTE_BCM112X) || defined(CONFIG_SIBYTE_SB1250)
85 /* Destructive read, clears register and interrupt */
86 status = csr_in32(IOADDR(A_SCD_BUS_ERR_STATUS));
87#elif defined(CONFIG_SIBYTE_BCM112X) || defined(CONFIG_SIBYTE_SB1250)
88 /* Use non-destructive register */ 85 /* Use non-destructive register */
89 status = csr_in32(IOADDR(A_SCD_BUS_ERR_STATUS_DEBUG)); 86 status = csr_in32(IOADDR(A_SCD_BUS_ERR_STATUS_DEBUG));
90#elif defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) 87#elif defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
diff --git a/arch/mips/sibyte/sb1250/setup.c b/arch/mips/sibyte/sb1250/setup.c
index 3c02b2a77ae9..9d3c24efdf4a 100644
--- a/arch/mips/sibyte/sb1250/setup.c
+++ b/arch/mips/sibyte/sb1250/setup.c
@@ -202,12 +202,10 @@ void __init sb1250_setup(void)
202 202
203 switch (war_pass) { 203 switch (war_pass) {
204 case K_SYS_REVISION_BCM1250_PASS1: 204 case K_SYS_REVISION_BCM1250_PASS1:
205#ifndef CONFIG_SB1_PASS_1_WORKAROUNDS
206 printk("@@@@ This is a BCM1250 A0-A2 (Pass 1) board, " 205 printk("@@@@ This is a BCM1250 A0-A2 (Pass 1) board, "
207 "and the kernel doesn't have the proper " 206 "and the kernel doesn't have the proper "
208 "workarounds compiled in. @@@@\n"); 207 "workarounds compiled in. @@@@\n");
209 bad_config = 1; 208 bad_config = 1;
210#endif
211 break; 209 break;
212 case K_SYS_REVISION_BCM1250_PASS2: 210 case K_SYS_REVISION_BCM1250_PASS2:
213 /* Pass 2 - easiest as default for now - so many numbers */ 211 /* Pass 2 - easiest as default for now - so many numbers */
diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c
index c0c4b3f88a08..1cf66f5ff23d 100644
--- a/arch/mips/sibyte/sb1250/smp.c
+++ b/arch/mips/sibyte/sb1250/smp.c
@@ -172,6 +172,9 @@ void sb1250_mailbox_interrupt(void)
172 if (action & SMP_RESCHEDULE_YOURSELF) 172 if (action & SMP_RESCHEDULE_YOURSELF)
173 scheduler_ipi(); 173 scheduler_ipi();
174 174
175 if (action & SMP_CALL_FUNCTION) 175 if (action & SMP_CALL_FUNCTION) {
176 smp_call_function_interrupt(); 176 irq_enter();
177 generic_smp_call_function_interrupt();
178 irq_exit();
179 }
177} 180}
diff --git a/arch/mn10300/include/asm/Kbuild b/arch/mn10300/include/asm/Kbuild
index de30b0c88796..6edb9ee6128e 100644
--- a/arch/mn10300/include/asm/Kbuild
+++ b/arch/mn10300/include/asm/Kbuild
@@ -5,6 +5,7 @@ generic-y += cputime.h
5generic-y += exec.h 5generic-y += exec.h
6generic-y += irq_work.h 6generic-y += irq_work.h
7generic-y += mcs_spinlock.h 7generic-y += mcs_spinlock.h
8generic-y += mm-arch-hooks.h
8generic-y += preempt.h 9generic-y += preempt.h
9generic-y += sections.h 10generic-y += sections.h
10generic-y += trace_clock.h 11generic-y += trace_clock.h
diff --git a/arch/mn10300/include/asm/mm-arch-hooks.h b/arch/mn10300/include/asm/mm-arch-hooks.h
deleted file mode 100644
index e2029a652f4c..000000000000
--- a/arch/mn10300/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_MN10300_MM_ARCH_HOOKS_H
13#define _ASM_MN10300_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_MN10300_MM_ARCH_HOOKS_H */
diff --git a/arch/nios2/include/asm/Kbuild b/arch/nios2/include/asm/Kbuild
index 434639d510b3..914864eb5a25 100644
--- a/arch/nios2/include/asm/Kbuild
+++ b/arch/nios2/include/asm/Kbuild
@@ -30,6 +30,7 @@ generic-y += kmap_types.h
30generic-y += kvm_para.h 30generic-y += kvm_para.h
31generic-y += local.h 31generic-y += local.h
32generic-y += mcs_spinlock.h 32generic-y += mcs_spinlock.h
33generic-y += mm-arch-hooks.h
33generic-y += mman.h 34generic-y += mman.h
34generic-y += module.h 35generic-y += module.h
35generic-y += msgbuf.h 36generic-y += msgbuf.h
diff --git a/arch/nios2/include/asm/mm-arch-hooks.h b/arch/nios2/include/asm/mm-arch-hooks.h
deleted file mode 100644
index d7290dc68558..000000000000
--- a/arch/nios2/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_NIOS2_MM_ARCH_HOOKS_H
13#define _ASM_NIOS2_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_NIOS2_MM_ARCH_HOOKS_H */
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index e5a693b16da2..443f44de1020 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -17,6 +17,7 @@ config OPENRISC
17 select GENERIC_IRQ_SHOW 17 select GENERIC_IRQ_SHOW
18 select GENERIC_IOMAP 18 select GENERIC_IOMAP
19 select GENERIC_CPU_DEVICES 19 select GENERIC_CPU_DEVICES
20 select HAVE_UID16
20 select GENERIC_ATOMIC64 21 select GENERIC_ATOMIC64
21 select GENERIC_CLOCKEVENTS 22 select GENERIC_CLOCKEVENTS
22 select GENERIC_STRNCPY_FROM_USER 23 select GENERIC_STRNCPY_FROM_USER
@@ -31,9 +32,6 @@ config MMU
31config HAVE_DMA_ATTRS 32config HAVE_DMA_ATTRS
32 def_bool y 33 def_bool y
33 34
34config UID16
35 def_bool y
36
37config RWSEM_GENERIC_SPINLOCK 35config RWSEM_GENERIC_SPINLOCK
38 def_bool y 36 def_bool y
39 37
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild
index 2a2e39b8109a..2832f031fb11 100644
--- a/arch/openrisc/include/asm/Kbuild
+++ b/arch/openrisc/include/asm/Kbuild
@@ -36,6 +36,7 @@ generic-y += kmap_types.h
36generic-y += kvm_para.h 36generic-y += kvm_para.h
37generic-y += local.h 37generic-y += local.h
38generic-y += mcs_spinlock.h 38generic-y += mcs_spinlock.h
39generic-y += mm-arch-hooks.h
39generic-y += mman.h 40generic-y += mman.h
40generic-y += module.h 41generic-y += module.h
41generic-y += msgbuf.h 42generic-y += msgbuf.h
diff --git a/arch/openrisc/include/asm/mm-arch-hooks.h b/arch/openrisc/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 6d33cb555fe1..000000000000
--- a/arch/openrisc/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_OPENRISC_MM_ARCH_HOOKS_H
13#define _ASM_OPENRISC_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_OPENRISC_MM_ARCH_HOOKS_H */
diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild
index 12b341d04f88..f9b3a81aefcd 100644
--- a/arch/parisc/include/asm/Kbuild
+++ b/arch/parisc/include/asm/Kbuild
@@ -15,6 +15,7 @@ generic-y += kvm_para.h
15generic-y += local.h 15generic-y += local.h
16generic-y += local64.h 16generic-y += local64.h
17generic-y += mcs_spinlock.h 17generic-y += mcs_spinlock.h
18generic-y += mm-arch-hooks.h
18generic-y += mutex.h 19generic-y += mutex.h
19generic-y += param.h 20generic-y += param.h
20generic-y += percpu.h 21generic-y += percpu.h
diff --git a/arch/parisc/include/asm/mm-arch-hooks.h b/arch/parisc/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 654ec63b0ee9..000000000000
--- a/arch/parisc/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_PARISC_MM_ARCH_HOOKS_H
13#define _ASM_PARISC_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_PARISC_MM_ARCH_HOOKS_H */
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
index 3a08eae3318f..3edbb9fc91b4 100644
--- a/arch/parisc/include/asm/pgalloc.h
+++ b/arch/parisc/include/asm/pgalloc.h
@@ -72,7 +72,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
72 72
73static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) 73static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
74{ 74{
75 if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED) 75 if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) {
76 /* 76 /*
77 * This is the permanent pmd attached to the pgd; 77 * This is the permanent pmd attached to the pgd;
78 * cannot free it. 78 * cannot free it.
@@ -81,6 +81,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
81 */ 81 */
82 mm_inc_nr_pmds(mm); 82 mm_inc_nr_pmds(mm);
83 return; 83 return;
84 }
84 free_pages((unsigned long)pmd, PMD_ORDER); 85 free_pages((unsigned long)pmd, PMD_ORDER);
85} 86}
86 87
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index d3a831ac0f92..da50e0c9c57e 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -966,8 +966,6 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *d, const siginfo_t *s)
966 966
967int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from) 967int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
968{ 968{
969 memset(to, 0, sizeof *to);
970
971 if (copy_from_user(to, from, 3*sizeof(int)) || 969 if (copy_from_user(to, from, 3*sizeof(int)) ||
972 copy_from_user(to->_sifields._pad, 970 copy_from_user(to->_sifields._pad,
973 from->_sifields._pad, SI_PAD_SIZE32)) 971 from->_sifields._pad, SI_PAD_SIZE32))
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index 5cf5e6ea213b..7cf0df859d05 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -1478,7 +1478,7 @@ static int pnv_eeh_next_error(struct eeh_pe **pe)
1478 } 1478 }
1479 1479
1480 /* Unmask the event */ 1480 /* Unmask the event */
1481 if (eeh_enabled()) 1481 if (ret == EEH_NEXT_ERR_NONE && eeh_enabled())
1482 enable_irq(eeh_event_irq); 1482 enable_irq(eeh_event_irq);
1483 1483
1484 return ret; 1484 return ret;
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 5738d315248b..85cbc96eff6c 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -2220,7 +2220,7 @@ static void pnv_pci_ioda_setup_opal_tce_kill(struct pnv_phb *phb)
2220 2220
2221static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift, 2221static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift,
2222 unsigned levels, unsigned long limit, 2222 unsigned levels, unsigned long limit,
2223 unsigned long *current_offset) 2223 unsigned long *current_offset, unsigned long *total_allocated)
2224{ 2224{
2225 struct page *tce_mem = NULL; 2225 struct page *tce_mem = NULL;
2226 __be64 *addr, *tmp; 2226 __be64 *addr, *tmp;
@@ -2236,6 +2236,7 @@ static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift,
2236 } 2236 }
2237 addr = page_address(tce_mem); 2237 addr = page_address(tce_mem);
2238 memset(addr, 0, allocated); 2238 memset(addr, 0, allocated);
2239 *total_allocated += allocated;
2239 2240
2240 --levels; 2241 --levels;
2241 if (!levels) { 2242 if (!levels) {
@@ -2245,7 +2246,7 @@ static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift,
2245 2246
2246 for (i = 0; i < entries; ++i) { 2247 for (i = 0; i < entries; ++i) {
2247 tmp = pnv_pci_ioda2_table_do_alloc_pages(nid, shift, 2248 tmp = pnv_pci_ioda2_table_do_alloc_pages(nid, shift,
2248 levels, limit, current_offset); 2249 levels, limit, current_offset, total_allocated);
2249 if (!tmp) 2250 if (!tmp)
2250 break; 2251 break;
2251 2252
@@ -2267,7 +2268,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
2267 struct iommu_table *tbl) 2268 struct iommu_table *tbl)
2268{ 2269{
2269 void *addr; 2270 void *addr;
2270 unsigned long offset = 0, level_shift; 2271 unsigned long offset = 0, level_shift, total_allocated = 0;
2271 const unsigned window_shift = ilog2(window_size); 2272 const unsigned window_shift = ilog2(window_size);
2272 unsigned entries_shift = window_shift - page_shift; 2273 unsigned entries_shift = window_shift - page_shift;
2273 unsigned table_shift = max_t(unsigned, entries_shift + 3, PAGE_SHIFT); 2274 unsigned table_shift = max_t(unsigned, entries_shift + 3, PAGE_SHIFT);
@@ -2286,7 +2287,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
2286 2287
2287 /* Allocate TCE table */ 2288 /* Allocate TCE table */
2288 addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift, 2289 addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
2289 levels, tce_table_size, &offset); 2290 levels, tce_table_size, &offset, &total_allocated);
2290 2291
2291 /* addr==NULL means that the first level allocation failed */ 2292 /* addr==NULL means that the first level allocation failed */
2292 if (!addr) 2293 if (!addr)
@@ -2308,7 +2309,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
2308 page_shift); 2309 page_shift);
2309 tbl->it_level_size = 1ULL << (level_shift - 3); 2310 tbl->it_level_size = 1ULL << (level_shift - 3);
2310 tbl->it_indirect_levels = levels - 1; 2311 tbl->it_indirect_levels = levels - 1;
2311 tbl->it_allocated_size = offset; 2312 tbl->it_allocated_size = total_allocated;
2312 2313
2313 pr_devel("Created TCE table: ws=%08llx ts=%lx @%08llx\n", 2314 pr_devel("Created TCE table: ws=%08llx ts=%lx @%08llx\n",
2314 window_size, tce_table_size, bus_offset); 2315 window_size, tce_table_size, bus_offset);
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index dc5385ebb071..5ad26dd94d77 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -3,5 +3,6 @@
3generic-y += clkdev.h 3generic-y += clkdev.h
4generic-y += irq_work.h 4generic-y += irq_work.h
5generic-y += mcs_spinlock.h 5generic-y += mcs_spinlock.h
6generic-y += mm-arch-hooks.h
6generic-y += preempt.h 7generic-y += preempt.h
7generic-y += trace_clock.h 8generic-y += trace_clock.h
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
index cfad7fca01d6..d7697ab802f6 100644
--- a/arch/s390/include/asm/ctl_reg.h
+++ b/arch/s390/include/asm/ctl_reg.h
@@ -57,7 +57,10 @@ union ctlreg0 {
57 unsigned long lap : 1; /* Low-address-protection control */ 57 unsigned long lap : 1; /* Low-address-protection control */
58 unsigned long : 4; 58 unsigned long : 4;
59 unsigned long edat : 1; /* Enhanced-DAT-enablement control */ 59 unsigned long edat : 1; /* Enhanced-DAT-enablement control */
60 unsigned long : 23; 60 unsigned long : 4;
61 unsigned long afp : 1; /* AFP-register control */
62 unsigned long vx : 1; /* Vector enablement control */
63 unsigned long : 17;
61 }; 64 };
62}; 65};
63 66
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index 0130d0379edd..d9be7c0c1291 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -14,6 +14,7 @@
14 14
15#define is_hugepage_only_range(mm, addr, len) 0 15#define is_hugepage_only_range(mm, addr, len) 0
16#define hugetlb_free_pgd_range free_pgd_range 16#define hugetlb_free_pgd_range free_pgd_range
17#define hugepages_supported() (MACHINE_HAS_HPAGE)
17 18
18void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 19void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
19 pte_t *ptep, pte_t pte); 20 pte_t *ptep, pte_t pte);
diff --git a/arch/s390/include/asm/mm-arch-hooks.h b/arch/s390/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 07680b2f3c59..000000000000
--- a/arch/s390/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_S390_MM_ARCH_HOOKS_H
13#define _ASM_S390_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_S390_MM_ARCH_HOOKS_H */
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index dd345238d9a7..53eacbd4f09b 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -17,10 +17,7 @@
17#define PAGE_DEFAULT_ACC 0 17#define PAGE_DEFAULT_ACC 0
18#define PAGE_DEFAULT_KEY (PAGE_DEFAULT_ACC << 4) 18#define PAGE_DEFAULT_KEY (PAGE_DEFAULT_ACC << 4)
19 19
20#include <asm/setup.h> 20#define HPAGE_SHIFT 20
21#ifndef __ASSEMBLY__
22
23extern int HPAGE_SHIFT;
24#define HPAGE_SIZE (1UL << HPAGE_SHIFT) 21#define HPAGE_SIZE (1UL << HPAGE_SHIFT)
25#define HPAGE_MASK (~(HPAGE_SIZE - 1)) 22#define HPAGE_MASK (~(HPAGE_SIZE - 1))
26#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 23#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
@@ -30,6 +27,9 @@ extern int HPAGE_SHIFT;
30#define ARCH_HAS_PREPARE_HUGEPAGE 27#define ARCH_HAS_PREPARE_HUGEPAGE
31#define ARCH_HAS_HUGEPAGE_CLEAR_FLUSH 28#define ARCH_HAS_HUGEPAGE_CLEAR_FLUSH
32 29
30#include <asm/setup.h>
31#ifndef __ASSEMBLY__
32
33static inline void storage_key_init_range(unsigned long start, unsigned long end) 33static inline void storage_key_init_range(unsigned long start, unsigned long end)
34{ 34{
35#if PAGE_DEFAULT_KEY 35#if PAGE_DEFAULT_KEY
diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h
index 4cb19fe76dd9..f897ec73dc8c 100644
--- a/arch/s390/include/asm/perf_event.h
+++ b/arch/s390/include/asm/perf_event.h
@@ -87,7 +87,15 @@ struct sf_raw_sample {
87} __packed; 87} __packed;
88 88
89/* Perf hardware reserve and release functions */ 89/* Perf hardware reserve and release functions */
90#ifdef CONFIG_PERF_EVENTS
90int perf_reserve_sampling(void); 91int perf_reserve_sampling(void);
91void perf_release_sampling(void); 92void perf_release_sampling(void);
93#else /* CONFIG_PERF_EVENTS */
94static inline int perf_reserve_sampling(void)
95{
96 return 0;
97}
98static inline void perf_release_sampling(void) {}
99#endif /* CONFIG_PERF_EVENTS */
92 100
93#endif /* _ASM_S390_PERF_EVENT_H */ 101#endif /* _ASM_S390_PERF_EVENT_H */
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index c7d1b9d09011..a2da259d9327 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -23,15 +23,15 @@
23 23
24int main(void) 24int main(void)
25{ 25{
26 DEFINE(__THREAD_info, offsetof(struct task_struct, stack)); 26 DEFINE(__TASK_thread_info, offsetof(struct task_struct, stack));
27 DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp)); 27 DEFINE(__TASK_thread, offsetof(struct task_struct, thread));
28 DEFINE(__THREAD_mm_segment, offsetof(struct task_struct, thread.mm_segment));
29 BLANK();
30 DEFINE(__TASK_pid, offsetof(struct task_struct, pid)); 28 DEFINE(__TASK_pid, offsetof(struct task_struct, pid));
31 BLANK(); 29 BLANK();
32 DEFINE(__THREAD_per_cause, offsetof(struct task_struct, thread.per_event.cause)); 30 DEFINE(__THREAD_ksp, offsetof(struct thread_struct, ksp));
33 DEFINE(__THREAD_per_address, offsetof(struct task_struct, thread.per_event.address)); 31 DEFINE(__THREAD_per_cause, offsetof(struct thread_struct, per_event.cause));
34 DEFINE(__THREAD_per_paid, offsetof(struct task_struct, thread.per_event.paid)); 32 DEFINE(__THREAD_per_address, offsetof(struct thread_struct, per_event.address));
33 DEFINE(__THREAD_per_paid, offsetof(struct thread_struct, per_event.paid));
34 DEFINE(__THREAD_trap_tdb, offsetof(struct thread_struct, trap_tdb));
35 BLANK(); 35 BLANK();
36 DEFINE(__TI_task, offsetof(struct thread_info, task)); 36 DEFINE(__TI_task, offsetof(struct thread_info, task));
37 DEFINE(__TI_flags, offsetof(struct thread_info, flags)); 37 DEFINE(__TI_flags, offsetof(struct thread_info, flags));
@@ -176,7 +176,6 @@ int main(void)
176 DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data)); 176 DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data));
177 DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap)); 177 DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap));
178 DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb)); 178 DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb));
179 DEFINE(__THREAD_trap_tdb, offsetof(struct task_struct, thread.trap_tdb));
180 DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce)); 179 DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce));
181 DEFINE(__SIE_PROG0C, offsetof(struct kvm_s390_sie_block, prog0c)); 180 DEFINE(__SIE_PROG0C, offsetof(struct kvm_s390_sie_block, prog0c));
182 DEFINE(__SIE_PROG20, offsetof(struct kvm_s390_sie_block, prog20)); 181 DEFINE(__SIE_PROG20, offsetof(struct kvm_s390_sie_block, prog20));
diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c
index bff5e3b6d822..8ba32436effe 100644
--- a/arch/s390/kernel/cache.c
+++ b/arch/s390/kernel/cache.c
@@ -138,6 +138,8 @@ int init_cache_level(unsigned int cpu)
138 union cache_topology ct; 138 union cache_topology ct;
139 enum cache_type ctype; 139 enum cache_type ctype;
140 140
141 if (!test_facility(34))
142 return -EOPNOTSUPP;
141 if (!this_cpu_ci) 143 if (!this_cpu_ci)
142 return -EINVAL; 144 return -EINVAL;
143 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0); 145 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 3238893c9d4f..84062e7a77da 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -178,17 +178,21 @@ _PIF_WORK = (_PIF_PER_TRAP)
178 */ 178 */
179ENTRY(__switch_to) 179ENTRY(__switch_to)
180 stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task 180 stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
181 stg %r15,__THREAD_ksp(%r2) # store kernel stack of prev 181 lgr %r1,%r2
182 lg %r4,__THREAD_info(%r2) # get thread_info of prev 182 aghi %r1,__TASK_thread # thread_struct of prev task
183 lg %r5,__THREAD_info(%r3) # get thread_info of next 183 lg %r4,__TASK_thread_info(%r2) # get thread_info of prev
184 lg %r5,__TASK_thread_info(%r3) # get thread_info of next
185 stg %r15,__THREAD_ksp(%r1) # store kernel stack of prev
186 lgr %r1,%r3
187 aghi %r1,__TASK_thread # thread_struct of next task
184 lgr %r15,%r5 188 lgr %r15,%r5
185 aghi %r15,STACK_INIT # end of kernel stack of next 189 aghi %r15,STACK_INIT # end of kernel stack of next
186 stg %r3,__LC_CURRENT # store task struct of next 190 stg %r3,__LC_CURRENT # store task struct of next
187 stg %r5,__LC_THREAD_INFO # store thread info of next 191 stg %r5,__LC_THREAD_INFO # store thread info of next
188 stg %r15,__LC_KERNEL_STACK # store end of kernel stack 192 stg %r15,__LC_KERNEL_STACK # store end of kernel stack
193 lg %r15,__THREAD_ksp(%r1) # load kernel stack of next
189 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 194 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
190 mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next 195 mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next
191 lg %r15,__THREAD_ksp(%r3) # load kernel stack of next
192 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 196 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
193 br %r14 197 br %r14
194 198
@@ -417,6 +421,7 @@ ENTRY(pgm_check_handler)
417 LAST_BREAK %r14 421 LAST_BREAK %r14
418 lg %r15,__LC_KERNEL_STACK 422 lg %r15,__LC_KERNEL_STACK
419 lg %r14,__TI_task(%r12) 423 lg %r14,__TI_task(%r12)
424 aghi %r14,__TASK_thread # pointer to thread_struct
420 lghi %r13,__LC_PGM_TDB 425 lghi %r13,__LC_PGM_TDB
421 tm __LC_PGM_ILC+2,0x02 # check for transaction abort 426 tm __LC_PGM_ILC+2,0x02 # check for transaction abort
422 jz 2f 427 jz 2f
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 505c17c0ae1a..56b550893593 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -21,6 +21,7 @@
21#include <asm/nmi.h> 21#include <asm/nmi.h>
22#include <asm/crw.h> 22#include <asm/crw.h>
23#include <asm/switch_to.h> 23#include <asm/switch_to.h>
24#include <asm/ctl_reg.h>
24 25
25struct mcck_struct { 26struct mcck_struct {
26 int kill_task; 27 int kill_task;
@@ -129,26 +130,30 @@ static int notrace s390_revalidate_registers(struct mci *mci)
129 } else 130 } else
130 asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area)); 131 asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area));
131 132
132 asm volatile( 133 if (!MACHINE_HAS_VX) {
133 " ld 0,0(%0)\n" 134 /* Revalidate floating point registers */
134 " ld 1,8(%0)\n" 135 asm volatile(
135 " ld 2,16(%0)\n" 136 " ld 0,0(%0)\n"
136 " ld 3,24(%0)\n" 137 " ld 1,8(%0)\n"
137 " ld 4,32(%0)\n" 138 " ld 2,16(%0)\n"
138 " ld 5,40(%0)\n" 139 " ld 3,24(%0)\n"
139 " ld 6,48(%0)\n" 140 " ld 4,32(%0)\n"
140 " ld 7,56(%0)\n" 141 " ld 5,40(%0)\n"
141 " ld 8,64(%0)\n" 142 " ld 6,48(%0)\n"
142 " ld 9,72(%0)\n" 143 " ld 7,56(%0)\n"
143 " ld 10,80(%0)\n" 144 " ld 8,64(%0)\n"
144 " ld 11,88(%0)\n" 145 " ld 9,72(%0)\n"
145 " ld 12,96(%0)\n" 146 " ld 10,80(%0)\n"
146 " ld 13,104(%0)\n" 147 " ld 11,88(%0)\n"
147 " ld 14,112(%0)\n" 148 " ld 12,96(%0)\n"
148 " ld 15,120(%0)\n" 149 " ld 13,104(%0)\n"
149 : : "a" (fpt_save_area)); 150 " ld 14,112(%0)\n"
150 /* Revalidate vector registers */ 151 " ld 15,120(%0)\n"
151 if (MACHINE_HAS_VX && current->thread.vxrs) { 152 : : "a" (fpt_save_area));
153 } else {
154 /* Revalidate vector registers */
155 union ctlreg0 cr0;
156
152 if (!mci->vr) { 157 if (!mci->vr) {
153 /* 158 /*
154 * Vector registers can't be restored and therefore 159 * Vector registers can't be restored and therefore
@@ -156,8 +161,12 @@ static int notrace s390_revalidate_registers(struct mci *mci)
156 */ 161 */
157 kill_task = 1; 162 kill_task = 1;
158 } 163 }
164 cr0.val = S390_lowcore.cregs_save_area[0];
165 cr0.afp = cr0.vx = 1;
166 __ctl_load(cr0.val, 0, 0);
159 restore_vx_regs((__vector128 *) 167 restore_vx_regs((__vector128 *)
160 S390_lowcore.vector_save_area_addr); 168 &S390_lowcore.vector_save_area);
169 __ctl_load(S390_lowcore.cregs_save_area[0], 0, 0);
161 } 170 }
162 /* Revalidate access registers */ 171 /* Revalidate access registers */
163 asm volatile( 172 asm volatile(
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index dc5edc29b73a..8f587d871b9f 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -163,7 +163,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
163asmlinkage void execve_tail(void) 163asmlinkage void execve_tail(void)
164{ 164{
165 current->thread.fp_regs.fpc = 0; 165 current->thread.fp_regs.fpc = 0;
166 asm volatile("sfpc %0,%0" : : "d" (0)); 166 asm volatile("sfpc %0" : : "d" (0));
167} 167}
168 168
169/* 169/*
diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S
index 43c3169ea49c..ada0c07fe1a8 100644
--- a/arch/s390/kernel/sclp.S
+++ b/arch/s390/kernel/sclp.S
@@ -270,6 +270,8 @@ ENTRY(_sclp_print_early)
270 jno .Lesa2 270 jno .Lesa2
271 ahi %r15,-80 271 ahi %r15,-80
272 stmh %r6,%r15,96(%r15) # store upper register halves 272 stmh %r6,%r15,96(%r15) # store upper register halves
273 basr %r13,0
274 lmh %r0,%r15,.Lzeroes-.(%r13) # clear upper register halves
273.Lesa2: 275.Lesa2:
274 lr %r10,%r2 # save string pointer 276 lr %r10,%r2 # save string pointer
275 lhi %r2,0 277 lhi %r2,0
@@ -291,6 +293,8 @@ ENTRY(_sclp_print_early)
291.Lesa3: 293.Lesa3:
292 lm %r6,%r15,120(%r15) # restore registers 294 lm %r6,%r15,120(%r15) # restore registers
293 br %r14 295 br %r14
296.Lzeroes:
297 .fill 64,4,0
294 298
295.LwritedataS4: 299.LwritedataS4:
296 .long 0x00760005 # SCLP command for write data 300 .long 0x00760005 # SCLP command for write data
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index f7f027caaaaa..ca070d260af2 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -885,8 +885,6 @@ void __init setup_arch(char **cmdline_p)
885 */ 885 */
886 setup_hwcaps(); 886 setup_hwcaps();
887 887
888 HPAGE_SHIFT = MACHINE_HAS_HPAGE ? 20 : 0;
889
890 /* 888 /*
891 * Create kernel page tables and switch to virtual addressing. 889 * Create kernel page tables and switch to virtual addressing.
892 */ 890 */
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 4d96c9f53455..7bea81d8a363 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -259,7 +259,7 @@ void vector_exception(struct pt_regs *regs)
259 } 259 }
260 260
261 /* get vector interrupt code from fpc */ 261 /* get vector interrupt code from fpc */
262 asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); 262 asm volatile("stfpc %0" : "=Q" (current->thread.fp_regs.fpc));
263 vic = (current->thread.fp_regs.fpc & 0xf00) >> 8; 263 vic = (current->thread.fp_regs.fpc & 0xf00) >> 8;
264 switch (vic) { 264 switch (vic) {
265 case 1: /* invalid vector operation */ 265 case 1: /* invalid vector operation */
@@ -297,7 +297,7 @@ void data_exception(struct pt_regs *regs)
297 297
298 location = get_trap_ip(regs); 298 location = get_trap_ip(regs);
299 299
300 asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); 300 asm volatile("stfpc %0" : "=Q" (current->thread.fp_regs.fpc));
301 /* Check for vector register enablement */ 301 /* Check for vector register enablement */
302 if (MACHINE_HAS_VX && !current->thread.vxrs && 302 if (MACHINE_HAS_VX && !current->thread.vxrs &&
303 (current->thread.fp_regs.fpc & FPC_DXC_MASK) == 0xfe00) { 303 (current->thread.fp_regs.fpc & FPC_DXC_MASK) == 0xfe00) {
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 2078f92d15ac..f32f843a3631 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1742,10 +1742,10 @@ static bool ibs_enabled(struct kvm_vcpu *vcpu)
1742 1742
1743static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) 1743static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1744{ 1744{
1745 if (!vcpu->requests)
1746 return 0;
1747retry: 1745retry:
1748 kvm_s390_vcpu_request_handled(vcpu); 1746 kvm_s390_vcpu_request_handled(vcpu);
1747 if (!vcpu->requests)
1748 return 0;
1749 /* 1749 /*
1750 * We use MMU_RELOAD just to re-arm the ipte notifier for the 1750 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1751 * guest prefix page. gmap_ipte_notify will wait on the ptl lock. 1751 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 33082d0d101b..b33f66110ca9 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -31,8 +31,6 @@
31#define ALLOC_ORDER 2 31#define ALLOC_ORDER 2
32#define FRAG_MASK 0x03 32#define FRAG_MASK 0x03
33 33
34int HPAGE_SHIFT;
35
36unsigned long *crst_table_alloc(struct mm_struct *mm) 34unsigned long *crst_table_alloc(struct mm_struct *mm)
37{ 35{
38 struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); 36 struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index fee782acc2ee..8d2e5165865f 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -448,13 +448,13 @@ static void bpf_jit_prologue(struct bpf_jit *jit)
448 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0, 448 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0,
449 BPF_REG_1, offsetof(struct sk_buff, data)); 449 BPF_REG_1, offsetof(struct sk_buff, data));
450 } 450 }
451 /* BPF compatibility: clear A (%b7) and X (%b8) registers */ 451 /* BPF compatibility: clear A (%b0) and X (%b7) registers */
452 if (REG_SEEN(BPF_REG_7)) 452 if (REG_SEEN(BPF_REG_A))
453 /* lghi %b7,0 */ 453 /* lghi %ba,0 */
454 EMIT4_IMM(0xa7090000, BPF_REG_7, 0); 454 EMIT4_IMM(0xa7090000, BPF_REG_A, 0);
455 if (REG_SEEN(BPF_REG_8)) 455 if (REG_SEEN(BPF_REG_X))
456 /* lghi %b8,0 */ 456 /* lghi %bx,0 */
457 EMIT4_IMM(0xa7090000, BPF_REG_8, 0); 457 EMIT4_IMM(0xa7090000, BPF_REG_X, 0);
458} 458}
459 459
460/* 460/*
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index bc927a09a172..9cfa2ffaa9d6 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -16,6 +16,7 @@
16#include <linux/fs.h> 16#include <linux/fs.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <asm/processor.h> 18#include <asm/processor.h>
19#include <asm/perf_event.h>
19 20
20#include "../../../drivers/oprofile/oprof.h" 21#include "../../../drivers/oprofile/oprof.h"
21 22
diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild
index 138fb3db45ba..92ffe397b893 100644
--- a/arch/score/include/asm/Kbuild
+++ b/arch/score/include/asm/Kbuild
@@ -7,6 +7,7 @@ generic-y += clkdev.h
7generic-y += cputime.h 7generic-y += cputime.h
8generic-y += irq_work.h 8generic-y += irq_work.h
9generic-y += mcs_spinlock.h 9generic-y += mcs_spinlock.h
10generic-y += mm-arch-hooks.h
10generic-y += preempt.h 11generic-y += preempt.h
11generic-y += sections.h 12generic-y += sections.h
12generic-y += trace_clock.h 13generic-y += trace_clock.h
diff --git a/arch/score/include/asm/mm-arch-hooks.h b/arch/score/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 5e38689f189a..000000000000
--- a/arch/score/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_SCORE_MM_ARCH_HOOKS_H
13#define _ASM_SCORE_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_SCORE_MM_ARCH_HOOKS_H */
diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild
index 9ac4626e7284..aac452b26aa8 100644
--- a/arch/sh/include/asm/Kbuild
+++ b/arch/sh/include/asm/Kbuild
@@ -16,6 +16,7 @@ generic-y += kvm_para.h
16generic-y += local.h 16generic-y += local.h
17generic-y += local64.h 17generic-y += local64.h
18generic-y += mcs_spinlock.h 18generic-y += mcs_spinlock.h
19generic-y += mm-arch-hooks.h
19generic-y += mman.h 20generic-y += mman.h
20generic-y += msgbuf.h 21generic-y += msgbuf.h
21generic-y += param.h 22generic-y += param.h
diff --git a/arch/sh/include/asm/mm-arch-hooks.h b/arch/sh/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 18087298b728..000000000000
--- a/arch/sh/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_SH_MM_ARCH_HOOKS_H
13#define _ASM_SH_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_SH_MM_ARCH_HOOKS_H */
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index 2b2a69dcc467..e928618838bc 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -12,6 +12,7 @@ generic-y += linkage.h
12generic-y += local.h 12generic-y += local.h
13generic-y += local64.h 13generic-y += local64.h
14generic-y += mcs_spinlock.h 14generic-y += mcs_spinlock.h
15generic-y += mm-arch-hooks.h
15generic-y += module.h 16generic-y += module.h
16generic-y += mutex.h 17generic-y += mutex.h
17generic-y += preempt.h 18generic-y += preempt.h
diff --git a/arch/sparc/include/asm/mm-arch-hooks.h b/arch/sparc/include/asm/mm-arch-hooks.h
deleted file mode 100644
index b89ba44c16f1..000000000000
--- a/arch/sparc/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_SPARC_MM_ARCH_HOOKS_H
13#define _ASM_SPARC_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_SPARC_MM_ARCH_HOOKS_H */
diff --git a/arch/sparc/include/asm/visasm.h b/arch/sparc/include/asm/visasm.h
index 1f0aa2024e94..6424249d5f78 100644
--- a/arch/sparc/include/asm/visasm.h
+++ b/arch/sparc/include/asm/visasm.h
@@ -28,16 +28,10 @@
28 * Must preserve %o5 between VISEntryHalf and VISExitHalf */ 28 * Must preserve %o5 between VISEntryHalf and VISExitHalf */
29 29
30#define VISEntryHalf \ 30#define VISEntryHalf \
31 rd %fprs, %o5; \ 31 VISEntry
32 andcc %o5, FPRS_FEF, %g0; \ 32
33 be,pt %icc, 297f; \ 33#define VISExitHalf \
34 sethi %hi(298f), %g7; \ 34 VISExit
35 sethi %hi(VISenterhalf), %g1; \
36 jmpl %g1 + %lo(VISenterhalf), %g0; \
37 or %g7, %lo(298f), %g7; \
38 clr %o5; \
39297: wr %o5, FPRS_FEF, %fprs; \
40298:
41 35
42#define VISEntryHalfFast(fail_label) \ 36#define VISEntryHalfFast(fail_label) \
43 rd %fprs, %o5; \ 37 rd %fprs, %o5; \
@@ -47,7 +41,7 @@
47 ba,a,pt %xcc, fail_label; \ 41 ba,a,pt %xcc, fail_label; \
48297: wr %o5, FPRS_FEF, %fprs; 42297: wr %o5, FPRS_FEF, %fprs;
49 43
50#define VISExitHalf \ 44#define VISExitHalfFast \
51 wr %o5, 0, %fprs; 45 wr %o5, 0, %fprs;
52 46
53#ifndef __ASSEMBLY__ 47#ifndef __ASSEMBLY__
diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S
index 140527a20e7d..83aeeb1dffdb 100644
--- a/arch/sparc/lib/NG4memcpy.S
+++ b/arch/sparc/lib/NG4memcpy.S
@@ -240,8 +240,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
240 add %o0, 0x40, %o0 240 add %o0, 0x40, %o0
241 bne,pt %icc, 1b 241 bne,pt %icc, 1b
242 LOAD(prefetch, %g1 + 0x200, #n_reads_strong) 242 LOAD(prefetch, %g1 + 0x200, #n_reads_strong)
243#ifdef NON_USER_COPY
244 VISExitHalfFast
245#else
243 VISExitHalf 246 VISExitHalf
244 247#endif
245 brz,pn %o2, .Lexit 248 brz,pn %o2, .Lexit
246 cmp %o2, 19 249 cmp %o2, 19
247 ble,pn %icc, .Lsmall_unaligned 250 ble,pn %icc, .Lsmall_unaligned
diff --git a/arch/sparc/lib/VISsave.S b/arch/sparc/lib/VISsave.S
index b320ae9e2e2e..a063d84336d6 100644
--- a/arch/sparc/lib/VISsave.S
+++ b/arch/sparc/lib/VISsave.S
@@ -44,9 +44,8 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3
44 44
45 stx %g3, [%g6 + TI_GSR] 45 stx %g3, [%g6 + TI_GSR]
462: add %g6, %g1, %g3 462: add %g6, %g1, %g3
47 cmp %o5, FPRS_DU 47 mov FPRS_DU | FPRS_DL | FPRS_FEF, %o5
48 be,pn %icc, 6f 48 sll %g1, 3, %g1
49 sll %g1, 3, %g1
50 stb %o5, [%g3 + TI_FPSAVED] 49 stb %o5, [%g3 + TI_FPSAVED]
51 rd %gsr, %g2 50 rd %gsr, %g2
52 add %g6, %g1, %g3 51 add %g6, %g1, %g3
@@ -80,65 +79,3 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3
80 .align 32 79 .align 32
8180: jmpl %g7 + %g0, %g0 8080: jmpl %g7 + %g0, %g0
82 nop 81 nop
83
846: ldub [%g3 + TI_FPSAVED], %o5
85 or %o5, FPRS_DU, %o5
86 add %g6, TI_FPREGS+0x80, %g2
87 stb %o5, [%g3 + TI_FPSAVED]
88
89 sll %g1, 5, %g1
90 add %g6, TI_FPREGS+0xc0, %g3
91 wr %g0, FPRS_FEF, %fprs
92 membar #Sync
93 stda %f32, [%g2 + %g1] ASI_BLK_P
94 stda %f48, [%g3 + %g1] ASI_BLK_P
95 membar #Sync
96 ba,pt %xcc, 80f
97 nop
98
99 .align 32
10080: jmpl %g7 + %g0, %g0
101 nop
102
103 .align 32
104VISenterhalf:
105 ldub [%g6 + TI_FPDEPTH], %g1
106 brnz,a,pn %g1, 1f
107 cmp %g1, 1
108 stb %g0, [%g6 + TI_FPSAVED]
109 stx %fsr, [%g6 + TI_XFSR]
110 clr %o5
111 jmpl %g7 + %g0, %g0
112 wr %g0, FPRS_FEF, %fprs
113
1141: bne,pn %icc, 2f
115 srl %g1, 1, %g1
116 ba,pt %xcc, vis1
117 sub %g7, 8, %g7
1182: addcc %g6, %g1, %g3
119 sll %g1, 3, %g1
120 andn %o5, FPRS_DU, %g2
121 stb %g2, [%g3 + TI_FPSAVED]
122
123 rd %gsr, %g2
124 add %g6, %g1, %g3
125 stx %g2, [%g3 + TI_GSR]
126 add %g6, %g1, %g2
127 stx %fsr, [%g2 + TI_XFSR]
128 sll %g1, 5, %g1
1293: andcc %o5, FPRS_DL, %g0
130 be,pn %icc, 4f
131 add %g6, TI_FPREGS, %g2
132
133 add %g6, TI_FPREGS+0x40, %g3
134 membar #Sync
135 stda %f0, [%g2 + %g1] ASI_BLK_P
136 stda %f16, [%g3 + %g1] ASI_BLK_P
137 membar #Sync
138 ba,pt %xcc, 4f
139 nop
140
141 .align 32
1424: and %o5, FPRS_DU, %o5
143 jmpl %g7 + %g0, %g0
144 wr %o5, FPRS_FEF, %fprs
diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
index 1d649a95660c..8069ce12f20b 100644
--- a/arch/sparc/lib/ksyms.c
+++ b/arch/sparc/lib/ksyms.c
@@ -135,10 +135,6 @@ EXPORT_SYMBOL(copy_user_page);
135void VISenter(void); 135void VISenter(void);
136EXPORT_SYMBOL(VISenter); 136EXPORT_SYMBOL(VISenter);
137 137
138/* CRYPTO code needs this */
139void VISenterhalf(void);
140EXPORT_SYMBOL(VISenterhalf);
141
142extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *); 138extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
143extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *, 139extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
144 unsigned long *); 140 unsigned long *);
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild
index d53654488c2c..d8a843163471 100644
--- a/arch/tile/include/asm/Kbuild
+++ b/arch/tile/include/asm/Kbuild
@@ -19,6 +19,7 @@ generic-y += irq_regs.h
19generic-y += local.h 19generic-y += local.h
20generic-y += local64.h 20generic-y += local64.h
21generic-y += mcs_spinlock.h 21generic-y += mcs_spinlock.h
22generic-y += mm-arch-hooks.h
22generic-y += msgbuf.h 23generic-y += msgbuf.h
23generic-y += mutex.h 24generic-y += mutex.h
24generic-y += param.h 25generic-y += param.h
diff --git a/arch/tile/include/asm/mm-arch-hooks.h b/arch/tile/include/asm/mm-arch-hooks.h
deleted file mode 100644
index d1709ea774f7..000000000000
--- a/arch/tile/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_TILE_MM_ARCH_HOOKS_H
13#define _ASM_TILE_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_TILE_MM_ARCH_HOOKS_H */
diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c
index e8c2c04143cd..c667e104a0c2 100644
--- a/arch/tile/kernel/compat_signal.c
+++ b/arch/tile/kernel/compat_signal.c
@@ -113,8 +113,6 @@ int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
113 if (!access_ok(VERIFY_READ, from, sizeof(struct compat_siginfo))) 113 if (!access_ok(VERIFY_READ, from, sizeof(struct compat_siginfo)))
114 return -EFAULT; 114 return -EFAULT;
115 115
116 memset(to, 0, sizeof(*to));
117
118 err = __get_user(to->si_signo, &from->si_signo); 116 err = __get_user(to->si_signo, &from->si_signo);
119 err |= __get_user(to->si_errno, &from->si_errno); 117 err |= __get_user(to->si_errno, &from->si_errno);
120 err |= __get_user(to->si_code, &from->si_code); 118 err |= __get_user(to->si_code, &from->si_code);
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 99c9ff87e018..6b755d125783 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -1139,7 +1139,7 @@ static void __init load_hv_initrd(void)
1139 1139
1140void __init free_initrd_mem(unsigned long begin, unsigned long end) 1140void __init free_initrd_mem(unsigned long begin, unsigned long end)
1141{ 1141{
1142 free_bootmem(__pa(begin), end - begin); 1142 free_bootmem_late(__pa(begin), end - begin);
1143} 1143}
1144 1144
1145static int __init setup_initrd(char *str) 1145static int __init setup_initrd(char *str)
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild
index 3d63ff6f583f..149ec55f9c46 100644
--- a/arch/um/include/asm/Kbuild
+++ b/arch/um/include/asm/Kbuild
@@ -16,6 +16,7 @@ generic-y += irq_regs.h
16generic-y += irq_work.h 16generic-y += irq_work.h
17generic-y += kdebug.h 17generic-y += kdebug.h
18generic-y += mcs_spinlock.h 18generic-y += mcs_spinlock.h
19generic-y += mm-arch-hooks.h
19generic-y += mutex.h 20generic-y += mutex.h
20generic-y += param.h 21generic-y += param.h
21generic-y += pci.h 22generic-y += pci.h
diff --git a/arch/um/include/asm/mm-arch-hooks.h b/arch/um/include/asm/mm-arch-hooks.h
deleted file mode 100644
index a7c8b0dfdd4e..000000000000
--- a/arch/um/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_UM_MM_ARCH_HOOKS_H
13#define _ASM_UM_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_UM_MM_ARCH_HOOKS_H */
diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild
index d12b377b5a8b..1fc7a286dc6f 100644
--- a/arch/unicore32/include/asm/Kbuild
+++ b/arch/unicore32/include/asm/Kbuild
@@ -26,6 +26,7 @@ generic-y += kdebug.h
26generic-y += kmap_types.h 26generic-y += kmap_types.h
27generic-y += local.h 27generic-y += local.h
28generic-y += mcs_spinlock.h 28generic-y += mcs_spinlock.h
29generic-y += mm-arch-hooks.h
29generic-y += mman.h 30generic-y += mman.h
30generic-y += module.h 31generic-y += module.h
31generic-y += msgbuf.h 32generic-y += msgbuf.h
diff --git a/arch/unicore32/include/asm/mm-arch-hooks.h b/arch/unicore32/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 4d79a850c509..000000000000
--- a/arch/unicore32/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_UNICORE32_MM_ARCH_HOOKS_H
13#define _ASM_UNICORE32_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_UNICORE32_MM_ARCH_HOOKS_H */
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 3dbb7e7909ca..b3a1a5d77d92 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -41,6 +41,7 @@ config X86
41 select ARCH_USE_CMPXCHG_LOCKREF if X86_64 41 select ARCH_USE_CMPXCHG_LOCKREF if X86_64
42 select ARCH_USE_QUEUED_RWLOCKS 42 select ARCH_USE_QUEUED_RWLOCKS
43 select ARCH_USE_QUEUED_SPINLOCKS 43 select ARCH_USE_QUEUED_SPINLOCKS
44 select ARCH_WANTS_DYNAMIC_TASK_STRUCT
44 select ARCH_WANT_FRAME_POINTERS 45 select ARCH_WANT_FRAME_POINTERS
45 select ARCH_WANT_IPC_PARSE_VERSION if X86_32 46 select ARCH_WANT_IPC_PARSE_VERSION if X86_32
46 select ARCH_WANT_OPTIONAL_GPIOLIB 47 select ARCH_WANT_OPTIONAL_GPIOLIB
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index a15893d17c55..d8c0d3266173 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -297,6 +297,18 @@ config OPTIMIZE_INLINING
297 297
298 If unsure, say N. 298 If unsure, say N.
299 299
300config DEBUG_ENTRY
301 bool "Debug low-level entry code"
302 depends on DEBUG_KERNEL
303 ---help---
304 This option enables sanity checks in x86's low-level entry code.
305 Some of these sanity checks may slow down kernel entries and
306 exits or otherwise impact performance.
307
308 This is currently used to help test NMI code.
309
310 If unsure, say N.
311
300config DEBUG_NMI_SELFTEST 312config DEBUG_NMI_SELFTEST
301 bool "NMI Selftest" 313 bool "NMI Selftest"
302 depends on DEBUG_KERNEL && X86_LOCAL_APIC 314 depends on DEBUG_KERNEL && X86_LOCAL_APIC
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index 2c82bd150d43..7d69afd8b6fa 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -1193,6 +1193,10 @@ static efi_status_t setup_e820(struct boot_params *params,
1193 unsigned int e820_type = 0; 1193 unsigned int e820_type = 0;
1194 unsigned long m = efi->efi_memmap; 1194 unsigned long m = efi->efi_memmap;
1195 1195
1196#ifdef CONFIG_X86_64
1197 m |= (u64)efi->efi_memmap_hi << 32;
1198#endif
1199
1196 d = (efi_memory_desc_t *)(m + (i * efi->efi_memdesc_size)); 1200 d = (efi_memory_desc_t *)(m + (i * efi->efi_memdesc_size));
1197 switch (d->type) { 1201 switch (d->type) {
1198 case EFI_RESERVED_TYPE: 1202 case EFI_RESERVED_TYPE:
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 3bb2c4302df1..8cb3e438f21e 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -1237,11 +1237,12 @@ ENTRY(nmi)
1237 * If the variable is not set and the stack is not the NMI 1237 * If the variable is not set and the stack is not the NMI
1238 * stack then: 1238 * stack then:
1239 * o Set the special variable on the stack 1239 * o Set the special variable on the stack
1240 * o Copy the interrupt frame into a "saved" location on the stack 1240 * o Copy the interrupt frame into an "outermost" location on the
1241 * o Copy the interrupt frame into a "copy" location on the stack 1241 * stack
1242 * o Copy the interrupt frame into an "iret" location on the stack
1242 * o Continue processing the NMI 1243 * o Continue processing the NMI
1243 * If the variable is set or the previous stack is the NMI stack: 1244 * If the variable is set or the previous stack is the NMI stack:
1244 * o Modify the "copy" location to jump to the repeate_nmi 1245 * o Modify the "iret" location to jump to the repeat_nmi
1245 * o return back to the first NMI 1246 * o return back to the first NMI
1246 * 1247 *
1247 * Now on exit of the first NMI, we first clear the stack variable 1248 * Now on exit of the first NMI, we first clear the stack variable
@@ -1250,31 +1251,151 @@ ENTRY(nmi)
1250 * a nested NMI that updated the copy interrupt stack frame, a 1251 * a nested NMI that updated the copy interrupt stack frame, a
1251 * jump will be made to the repeat_nmi code that will handle the second 1252 * jump will be made to the repeat_nmi code that will handle the second
1252 * NMI. 1253 * NMI.
1254 *
1255 * However, espfix prevents us from directly returning to userspace
1256 * with a single IRET instruction. Similarly, IRET to user mode
1257 * can fault. We therefore handle NMIs from user space like
1258 * other IST entries.
1253 */ 1259 */
1254 1260
1255 /* Use %rdx as our temp variable throughout */ 1261 /* Use %rdx as our temp variable throughout */
1256 pushq %rdx 1262 pushq %rdx
1257 1263
1264 testb $3, CS-RIP+8(%rsp)
1265 jz .Lnmi_from_kernel
1266
1267 /*
1268 * NMI from user mode. We need to run on the thread stack, but we
1269 * can't go through the normal entry paths: NMIs are masked, and
1270 * we don't want to enable interrupts, because then we'll end
1271 * up in an awkward situation in which IRQs are on but NMIs
1272 * are off.
1273 */
1274
1275 SWAPGS
1276 cld
1277 movq %rsp, %rdx
1278 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
1279 pushq 5*8(%rdx) /* pt_regs->ss */
1280 pushq 4*8(%rdx) /* pt_regs->rsp */
1281 pushq 3*8(%rdx) /* pt_regs->flags */
1282 pushq 2*8(%rdx) /* pt_regs->cs */
1283 pushq 1*8(%rdx) /* pt_regs->rip */
1284 pushq $-1 /* pt_regs->orig_ax */
1285 pushq %rdi /* pt_regs->di */
1286 pushq %rsi /* pt_regs->si */
1287 pushq (%rdx) /* pt_regs->dx */
1288 pushq %rcx /* pt_regs->cx */
1289 pushq %rax /* pt_regs->ax */
1290 pushq %r8 /* pt_regs->r8 */
1291 pushq %r9 /* pt_regs->r9 */
1292 pushq %r10 /* pt_regs->r10 */
1293 pushq %r11 /* pt_regs->r11 */
1294 pushq %rbx /* pt_regs->rbx */
1295 pushq %rbp /* pt_regs->rbp */
1296 pushq %r12 /* pt_regs->r12 */
1297 pushq %r13 /* pt_regs->r13 */
1298 pushq %r14 /* pt_regs->r14 */
1299 pushq %r15 /* pt_regs->r15 */
1300
1301 /*
1302 * At this point we no longer need to worry about stack damage
1303 * due to nesting -- we're on the normal thread stack and we're
1304 * done with the NMI stack.
1305 */
1306
1307 movq %rsp, %rdi
1308 movq $-1, %rsi
1309 call do_nmi
1310
1311 /*
1312 * Return back to user mode. We must *not* do the normal exit
1313 * work, because we don't want to enable interrupts. Fortunately,
1314 * do_nmi doesn't modify pt_regs.
1315 */
1316 SWAPGS
1317 jmp restore_c_regs_and_iret
1318
1319.Lnmi_from_kernel:
1320 /*
1321 * Here's what our stack frame will look like:
1322 * +---------------------------------------------------------+
1323 * | original SS |
1324 * | original Return RSP |
1325 * | original RFLAGS |
1326 * | original CS |
1327 * | original RIP |
1328 * +---------------------------------------------------------+
1329 * | temp storage for rdx |
1330 * +---------------------------------------------------------+
1331 * | "NMI executing" variable |
1332 * +---------------------------------------------------------+
1333 * | iret SS } Copied from "outermost" frame |
1334 * | iret Return RSP } on each loop iteration; overwritten |
1335 * | iret RFLAGS } by a nested NMI to force another |
1336 * | iret CS } iteration if needed. |
1337 * | iret RIP } |
1338 * +---------------------------------------------------------+
1339 * | outermost SS } initialized in first_nmi; |
1340 * | outermost Return RSP } will not be changed before |
1341 * | outermost RFLAGS } NMI processing is done. |
1342 * | outermost CS } Copied to "iret" frame on each |
1343 * | outermost RIP } iteration. |
1344 * +---------------------------------------------------------+
1345 * | pt_regs |
1346 * +---------------------------------------------------------+
1347 *
1348 * The "original" frame is used by hardware. Before re-enabling
1349 * NMIs, we need to be done with it, and we need to leave enough
1350 * space for the asm code here.
1351 *
1352 * We return by executing IRET while RSP points to the "iret" frame.
1353 * That will either return for real or it will loop back into NMI
1354 * processing.
1355 *
1356 * The "outermost" frame is copied to the "iret" frame on each
1357 * iteration of the loop, so each iteration starts with the "iret"
1358 * frame pointing to the final return target.
1359 */
1360
1258 /* 1361 /*
1259 * If %cs was not the kernel segment, then the NMI triggered in user 1362 * Determine whether we're a nested NMI.
1260 * space, which means it is definitely not nested. 1363 *
1364 * If we interrupted kernel code between repeat_nmi and
1365 * end_repeat_nmi, then we are a nested NMI. We must not
1366 * modify the "iret" frame because it's being written by
1367 * the outer NMI. That's okay; the outer NMI handler is
1368 * about to about to call do_nmi anyway, so we can just
1369 * resume the outer NMI.
1261 */ 1370 */
1262 cmpl $__KERNEL_CS, 16(%rsp) 1371
1263 jne first_nmi 1372 movq $repeat_nmi, %rdx
1373 cmpq 8(%rsp), %rdx
1374 ja 1f
1375 movq $end_repeat_nmi, %rdx
1376 cmpq 8(%rsp), %rdx
1377 ja nested_nmi_out
13781:
1264 1379
1265 /* 1380 /*
1266 * Check the special variable on the stack to see if NMIs are 1381 * Now check "NMI executing". If it's set, then we're nested.
1267 * executing. 1382 * This will not detect if we interrupted an outer NMI just
1383 * before IRET.
1268 */ 1384 */
1269 cmpl $1, -8(%rsp) 1385 cmpl $1, -8(%rsp)
1270 je nested_nmi 1386 je nested_nmi
1271 1387
1272 /* 1388 /*
1273 * Now test if the previous stack was an NMI stack. 1389 * Now test if the previous stack was an NMI stack. This covers
1274 * We need the double check. We check the NMI stack to satisfy the 1390 * the case where we interrupt an outer NMI after it clears
1275 * race when the first NMI clears the variable before returning. 1391 * "NMI executing" but before IRET. We need to be careful, though:
1276 * We check the variable because the first NMI could be in a 1392 * there is one case in which RSP could point to the NMI stack
1277 * breakpoint routine using a breakpoint stack. 1393 * despite there being no NMI active: naughty userspace controls
1394 * RSP at the very beginning of the SYSCALL targets. We can
1395 * pull a fast one on naughty userspace, though: we program
1396 * SYSCALL to mask DF, so userspace cannot cause DF to be set
1397 * if it controls the kernel's RSP. We set DF before we clear
1398 * "NMI executing".
1278 */ 1399 */
1279 lea 6*8(%rsp), %rdx 1400 lea 6*8(%rsp), %rdx
1280 /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */ 1401 /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */
@@ -1286,25 +1407,20 @@ ENTRY(nmi)
1286 cmpq %rdx, 4*8(%rsp) 1407 cmpq %rdx, 4*8(%rsp)
1287 /* If it is below the NMI stack, it is a normal NMI */ 1408 /* If it is below the NMI stack, it is a normal NMI */
1288 jb first_nmi 1409 jb first_nmi
1289 /* Ah, it is within the NMI stack, treat it as nested */ 1410
1411 /* Ah, it is within the NMI stack. */
1412
1413 testb $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp)
1414 jz first_nmi /* RSP was user controlled. */
1415
1416 /* This is a nested NMI. */
1290 1417
1291nested_nmi: 1418nested_nmi:
1292 /* 1419 /*
1293 * Do nothing if we interrupted the fixup in repeat_nmi. 1420 * Modify the "iret" frame to point to repeat_nmi, forcing another
1294 * It's about to repeat the NMI handler, so we are fine 1421 * iteration of NMI handling.
1295 * with ignoring this one.
1296 */ 1422 */
1297 movq $repeat_nmi, %rdx 1423 subq $8, %rsp
1298 cmpq 8(%rsp), %rdx
1299 ja 1f
1300 movq $end_repeat_nmi, %rdx
1301 cmpq 8(%rsp), %rdx
1302 ja nested_nmi_out
1303
13041:
1305 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
1306 leaq -1*8(%rsp), %rdx
1307 movq %rdx, %rsp
1308 leaq -10*8(%rsp), %rdx 1424 leaq -10*8(%rsp), %rdx
1309 pushq $__KERNEL_DS 1425 pushq $__KERNEL_DS
1310 pushq %rdx 1426 pushq %rdx
@@ -1318,61 +1434,42 @@ nested_nmi:
1318nested_nmi_out: 1434nested_nmi_out:
1319 popq %rdx 1435 popq %rdx
1320 1436
1321 /* No need to check faults here */ 1437 /* We are returning to kernel mode, so this cannot result in a fault. */
1322 INTERRUPT_RETURN 1438 INTERRUPT_RETURN
1323 1439
1324first_nmi: 1440first_nmi:
1325 /* 1441 /* Restore rdx. */
1326 * Because nested NMIs will use the pushed location that we
1327 * stored in rdx, we must keep that space available.
1328 * Here's what our stack frame will look like:
1329 * +-------------------------+
1330 * | original SS |
1331 * | original Return RSP |
1332 * | original RFLAGS |
1333 * | original CS |
1334 * | original RIP |
1335 * +-------------------------+
1336 * | temp storage for rdx |
1337 * +-------------------------+
1338 * | NMI executing variable |
1339 * +-------------------------+
1340 * | copied SS |
1341 * | copied Return RSP |
1342 * | copied RFLAGS |
1343 * | copied CS |
1344 * | copied RIP |
1345 * +-------------------------+
1346 * | Saved SS |
1347 * | Saved Return RSP |
1348 * | Saved RFLAGS |
1349 * | Saved CS |
1350 * | Saved RIP |
1351 * +-------------------------+
1352 * | pt_regs |
1353 * +-------------------------+
1354 *
1355 * The saved stack frame is used to fix up the copied stack frame
1356 * that a nested NMI may change to make the interrupted NMI iret jump
1357 * to the repeat_nmi. The original stack frame and the temp storage
1358 * is also used by nested NMIs and can not be trusted on exit.
1359 */
1360 /* Do not pop rdx, nested NMIs will corrupt that part of the stack */
1361 movq (%rsp), %rdx 1442 movq (%rsp), %rdx
1362 1443
1363 /* Set the NMI executing variable on the stack. */ 1444 /* Make room for "NMI executing". */
1364 pushq $1 1445 pushq $0
1365 1446
1366 /* Leave room for the "copied" frame */ 1447 /* Leave room for the "iret" frame */
1367 subq $(5*8), %rsp 1448 subq $(5*8), %rsp
1368 1449
1369 /* Copy the stack frame to the Saved frame */ 1450 /* Copy the "original" frame to the "outermost" frame */
1370 .rept 5 1451 .rept 5
1371 pushq 11*8(%rsp) 1452 pushq 11*8(%rsp)
1372 .endr 1453 .endr
1373 1454
1374 /* Everything up to here is safe from nested NMIs */ 1455 /* Everything up to here is safe from nested NMIs */
1375 1456
1457#ifdef CONFIG_DEBUG_ENTRY
1458 /*
1459 * For ease of testing, unmask NMIs right away. Disabled by
1460 * default because IRET is very expensive.
1461 */
1462 pushq $0 /* SS */
1463 pushq %rsp /* RSP (minus 8 because of the previous push) */
1464 addq $8, (%rsp) /* Fix up RSP */
1465 pushfq /* RFLAGS */
1466 pushq $__KERNEL_CS /* CS */
1467 pushq $1f /* RIP */
1468 INTERRUPT_RETURN /* continues at repeat_nmi below */
14691:
1470#endif
1471
1472repeat_nmi:
1376 /* 1473 /*
1377 * If there was a nested NMI, the first NMI's iret will return 1474 * If there was a nested NMI, the first NMI's iret will return
1378 * here. But NMIs are still enabled and we can take another 1475 * here. But NMIs are still enabled and we can take another
@@ -1381,16 +1478,20 @@ first_nmi:
1381 * it will just return, as we are about to repeat an NMI anyway. 1478 * it will just return, as we are about to repeat an NMI anyway.
1382 * This makes it safe to copy to the stack frame that a nested 1479 * This makes it safe to copy to the stack frame that a nested
1383 * NMI will update. 1480 * NMI will update.
1481 *
1482 * RSP is pointing to "outermost RIP". gsbase is unknown, but, if
1483 * we're repeating an NMI, gsbase has the same value that it had on
1484 * the first iteration. paranoid_entry will load the kernel
1485 * gsbase if needed before we call do_nmi. "NMI executing"
1486 * is zero.
1384 */ 1487 */
1385repeat_nmi: 1488 movq $1, 10*8(%rsp) /* Set "NMI executing". */
1489
1386 /* 1490 /*
1387 * Update the stack variable to say we are still in NMI (the update 1491 * Copy the "outermost" frame to the "iret" frame. NMIs that nest
1388 * is benign for the non-repeat case, where 1 was pushed just above 1492 * here must not modify the "iret" frame while we're writing to
1389 * to this very stack slot). 1493 * it or it will end up containing garbage.
1390 */ 1494 */
1391 movq $1, 10*8(%rsp)
1392
1393 /* Make another copy, this one may be modified by nested NMIs */
1394 addq $(10*8), %rsp 1495 addq $(10*8), %rsp
1395 .rept 5 1496 .rept 5
1396 pushq -6*8(%rsp) 1497 pushq -6*8(%rsp)
@@ -1399,9 +1500,9 @@ repeat_nmi:
1399end_repeat_nmi: 1500end_repeat_nmi:
1400 1501
1401 /* 1502 /*
1402 * Everything below this point can be preempted by a nested 1503 * Everything below this point can be preempted by a nested NMI.
1403 * NMI if the first NMI took an exception and reset our iret stack 1504 * If this happens, then the inner NMI will change the "iret"
1404 * so that we repeat another NMI. 1505 * frame to point back to repeat_nmi.
1405 */ 1506 */
1406 pushq $-1 /* ORIG_RAX: no syscall to restart */ 1507 pushq $-1 /* ORIG_RAX: no syscall to restart */
1407 ALLOC_PT_GPREGS_ON_STACK 1508 ALLOC_PT_GPREGS_ON_STACK
@@ -1415,28 +1516,11 @@ end_repeat_nmi:
1415 */ 1516 */
1416 call paranoid_entry 1517 call paranoid_entry
1417 1518
1418 /*
1419 * Save off the CR2 register. If we take a page fault in the NMI then
1420 * it could corrupt the CR2 value. If the NMI preempts a page fault
1421 * handler before it was able to read the CR2 register, and then the
1422 * NMI itself takes a page fault, the page fault that was preempted
1423 * will read the information from the NMI page fault and not the
1424 * origin fault. Save it off and restore it if it changes.
1425 * Use the r12 callee-saved register.
1426 */
1427 movq %cr2, %r12
1428
1429 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ 1519 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
1430 movq %rsp, %rdi 1520 movq %rsp, %rdi
1431 movq $-1, %rsi 1521 movq $-1, %rsi
1432 call do_nmi 1522 call do_nmi
1433 1523
1434 /* Did the NMI take a page fault? Restore cr2 if it did */
1435 movq %cr2, %rcx
1436 cmpq %rcx, %r12
1437 je 1f
1438 movq %r12, %cr2
14391:
1440 testl %ebx, %ebx /* swapgs needed? */ 1524 testl %ebx, %ebx /* swapgs needed? */
1441 jnz nmi_restore 1525 jnz nmi_restore
1442nmi_swapgs: 1526nmi_swapgs:
@@ -1444,11 +1528,26 @@ nmi_swapgs:
1444nmi_restore: 1528nmi_restore:
1445 RESTORE_EXTRA_REGS 1529 RESTORE_EXTRA_REGS
1446 RESTORE_C_REGS 1530 RESTORE_C_REGS
1447 /* Pop the extra iret frame at once */ 1531
1532 /* Point RSP at the "iret" frame. */
1448 REMOVE_PT_GPREGS_FROM_STACK 6*8 1533 REMOVE_PT_GPREGS_FROM_STACK 6*8
1449 1534
1450 /* Clear the NMI executing stack variable */ 1535 /*
1451 movq $0, 5*8(%rsp) 1536 * Clear "NMI executing". Set DF first so that we can easily
1537 * distinguish the remaining code between here and IRET from
1538 * the SYSCALL entry and exit paths. On a native kernel, we
1539 * could just inspect RIP, but, on paravirt kernels,
1540 * INTERRUPT_RETURN can translate into a jump into a
1541 * hypercall page.
1542 */
1543 std
1544 movq $0, 5*8(%rsp) /* clear "NMI executing" */
1545
1546 /*
1547 * INTERRUPT_RETURN reads the "iret" frame and exits the NMI
1548 * stack in a single instruction. We are returning to kernel
1549 * mode, so this cannot result in a fault.
1550 */
1452 INTERRUPT_RETURN 1551 INTERRUPT_RETURN
1453END(nmi) 1552END(nmi)
1454 1553
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index bb187a6a877c..a7e257d9cb90 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -140,6 +140,7 @@ sysexit_from_sys_call:
140 */ 140 */
141 andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) 141 andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
142 movl RIP(%rsp), %ecx /* User %eip */ 142 movl RIP(%rsp), %ecx /* User %eip */
143 movq RAX(%rsp), %rax
143 RESTORE_RSI_RDI 144 RESTORE_RSI_RDI
144 xorl %edx, %edx /* Do not leak kernel information */ 145 xorl %edx, %edx /* Do not leak kernel information */
145 xorq %r8, %r8 146 xorq %r8, %r8
@@ -205,7 +206,6 @@ sysexit_from_sys_call:
205 movl RDX(%rsp), %edx /* arg3 */ 206 movl RDX(%rsp), %edx /* arg3 */
206 movl RSI(%rsp), %ecx /* arg4 */ 207 movl RSI(%rsp), %ecx /* arg4 */
207 movl RDI(%rsp), %r8d /* arg5 */ 208 movl RDI(%rsp), %r8d /* arg5 */
208 movl %ebp, %r9d /* arg6 */
209 .endm 209 .endm
210 210
211 .macro auditsys_exit exit 211 .macro auditsys_exit exit
@@ -220,7 +220,6 @@ sysexit_from_sys_call:
2201: setbe %al /* 1 if error, 0 if not */ 2201: setbe %al /* 1 if error, 0 if not */
221 movzbl %al, %edi /* zero-extend that into %edi */ 221 movzbl %al, %edi /* zero-extend that into %edi */
222 call __audit_syscall_exit 222 call __audit_syscall_exit
223 movq RAX(%rsp), %rax /* reload syscall return value */
224 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %edi 223 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %edi
225 DISABLE_INTERRUPTS(CLBR_NONE) 224 DISABLE_INTERRUPTS(CLBR_NONE)
226 TRACE_IRQS_OFF 225 TRACE_IRQS_OFF
@@ -236,6 +235,7 @@ sysexit_from_sys_call:
236 235
237sysenter_auditsys: 236sysenter_auditsys:
238 auditsys_entry_common 237 auditsys_entry_common
238 movl %ebp, %r9d /* reload 6th syscall arg */
239 jmp sysenter_dispatch 239 jmp sysenter_dispatch
240 240
241sysexit_audit: 241sysexit_audit:
@@ -336,7 +336,7 @@ ENTRY(entry_SYSCALL_compat)
336 * 32-bit zero extended: 336 * 32-bit zero extended:
337 */ 337 */
338 ASM_STAC 338 ASM_STAC
3391: movl (%r8), %ebp 3391: movl (%r8), %r9d
340 _ASM_EXTABLE(1b, ia32_badarg) 340 _ASM_EXTABLE(1b, ia32_badarg)
341 ASM_CLAC 341 ASM_CLAC
342 orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) 342 orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
@@ -346,7 +346,7 @@ ENTRY(entry_SYSCALL_compat)
346cstar_do_call: 346cstar_do_call:
347 /* 32-bit syscall -> 64-bit C ABI argument conversion */ 347 /* 32-bit syscall -> 64-bit C ABI argument conversion */
348 movl %edi, %r8d /* arg5 */ 348 movl %edi, %r8d /* arg5 */
349 movl %ebp, %r9d /* arg6 */ 349 /* r9 already loaded */ /* arg6 */
350 xchg %ecx, %esi /* rsi:arg2, rcx:arg4 */ 350 xchg %ecx, %esi /* rsi:arg2, rcx:arg4 */
351 movl %ebx, %edi /* arg1 */ 351 movl %ebx, %edi /* arg1 */
352 movl %edx, %edx /* arg3 (zero extension) */ 352 movl %edx, %edx /* arg3 (zero extension) */
@@ -358,7 +358,6 @@ cstar_dispatch:
358 call *ia32_sys_call_table(, %rax, 8) 358 call *ia32_sys_call_table(, %rax, 8)
359 movq %rax, RAX(%rsp) 359 movq %rax, RAX(%rsp)
3601: 3601:
361 movl RCX(%rsp), %ebp
362 DISABLE_INTERRUPTS(CLBR_NONE) 361 DISABLE_INTERRUPTS(CLBR_NONE)
363 TRACE_IRQS_OFF 362 TRACE_IRQS_OFF
364 testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) 363 testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
@@ -369,6 +368,7 @@ sysretl_from_sys_call:
369 RESTORE_RSI_RDI_RDX 368 RESTORE_RSI_RDI_RDX
370 movl RIP(%rsp), %ecx 369 movl RIP(%rsp), %ecx
371 movl EFLAGS(%rsp), %r11d 370 movl EFLAGS(%rsp), %r11d
371 movq RAX(%rsp), %rax
372 xorq %r10, %r10 372 xorq %r10, %r10
373 xorq %r9, %r9 373 xorq %r9, %r9
374 xorq %r8, %r8 374 xorq %r8, %r8
@@ -392,7 +392,9 @@ sysretl_from_sys_call:
392 392
393#ifdef CONFIG_AUDITSYSCALL 393#ifdef CONFIG_AUDITSYSCALL
394cstar_auditsys: 394cstar_auditsys:
395 movl %r9d, R9(%rsp) /* register to be clobbered by call */
395 auditsys_entry_common 396 auditsys_entry_common
397 movl R9(%rsp), %r9d /* reload 6th syscall arg */
396 jmp cstar_dispatch 398 jmp cstar_dispatch
397 399
398sysretl_audit: 400sysretl_audit:
@@ -404,14 +406,16 @@ cstar_tracesys:
404 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) 406 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
405 jz cstar_auditsys 407 jz cstar_auditsys
406#endif 408#endif
409 xchgl %r9d, %ebp
407 SAVE_EXTRA_REGS 410 SAVE_EXTRA_REGS
408 xorl %eax, %eax /* Do not leak kernel information */ 411 xorl %eax, %eax /* Do not leak kernel information */
409 movq %rax, R11(%rsp) 412 movq %rax, R11(%rsp)
410 movq %rax, R10(%rsp) 413 movq %rax, R10(%rsp)
411 movq %rax, R9(%rsp) 414 movq %r9, R9(%rsp)
412 movq %rax, R8(%rsp) 415 movq %rax, R8(%rsp)
413 movq %rsp, %rdi /* &pt_regs -> arg1 */ 416 movq %rsp, %rdi /* &pt_regs -> arg1 */
414 call syscall_trace_enter 417 call syscall_trace_enter
418 movl R9(%rsp), %r9d
415 419
416 /* Reload arg registers from stack. (see sysenter_tracesys) */ 420 /* Reload arg registers from stack. (see sysenter_tracesys) */
417 movl RCX(%rsp), %ecx 421 movl RCX(%rsp), %ecx
@@ -421,6 +425,7 @@ cstar_tracesys:
421 movl %eax, %eax /* zero extension */ 425 movl %eax, %eax /* zero extension */
422 426
423 RESTORE_EXTRA_REGS 427 RESTORE_EXTRA_REGS
428 xchgl %ebp, %r9d
424 jmp cstar_do_call 429 jmp cstar_do_call
425END(entry_SYSCALL_compat) 430END(entry_SYSCALL_compat)
426 431
diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild
index 4dd1f2d770af..aeac434c9feb 100644
--- a/arch/x86/include/asm/Kbuild
+++ b/arch/x86/include/asm/Kbuild
@@ -9,3 +9,4 @@ generic-y += cputime.h
9generic-y += dma-contiguous.h 9generic-y += dma-contiguous.h
10generic-y += early_ioremap.h 10generic-y += early_ioremap.h
11generic-y += mcs_spinlock.h 11generic-y += mcs_spinlock.h
12generic-y += mm-arch-hooks.h
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index a0bf89fd2647..4e10d73cf018 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -280,21 +280,6 @@ static inline void clear_LDT(void)
280 set_ldt(NULL, 0); 280 set_ldt(NULL, 0);
281} 281}
282 282
283/*
284 * load one particular LDT into the current CPU
285 */
286static inline void load_LDT_nolock(mm_context_t *pc)
287{
288 set_ldt(pc->ldt, pc->size);
289}
290
291static inline void load_LDT(mm_context_t *pc)
292{
293 preempt_disable();
294 load_LDT_nolock(pc);
295 preempt_enable();
296}
297
298static inline unsigned long get_desc_base(const struct desc_struct *desc) 283static inline unsigned long get_desc_base(const struct desc_struct *desc)
299{ 284{
300 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24)); 285 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h
index 0637826292de..c49c5173158e 100644
--- a/arch/x86/include/asm/fpu/types.h
+++ b/arch/x86/include/asm/fpu/types.h
@@ -189,6 +189,7 @@ union fpregs_state {
189 struct fxregs_state fxsave; 189 struct fxregs_state fxsave;
190 struct swregs_state soft; 190 struct swregs_state soft;
191 struct xregs_state xsave; 191 struct xregs_state xsave;
192 u8 __padding[PAGE_SIZE];
192}; 193};
193 194
194/* 195/*
@@ -198,40 +199,6 @@ union fpregs_state {
198 */ 199 */
199struct fpu { 200struct fpu {
200 /* 201 /*
201 * @state:
202 *
203 * In-memory copy of all FPU registers that we save/restore
204 * over context switches. If the task is using the FPU then
205 * the registers in the FPU are more recent than this state
206 * copy. If the task context-switches away then they get
207 * saved here and represent the FPU state.
208 *
209 * After context switches there may be a (short) time period
210 * during which the in-FPU hardware registers are unchanged
211 * and still perfectly match this state, if the tasks
212 * scheduled afterwards are not using the FPU.
213 *
214 * This is the 'lazy restore' window of optimization, which
215 * we track though 'fpu_fpregs_owner_ctx' and 'fpu->last_cpu'.
216 *
217 * We detect whether a subsequent task uses the FPU via setting
218 * CR0::TS to 1, which causes any FPU use to raise a #NM fault.
219 *
220 * During this window, if the task gets scheduled again, we
221 * might be able to skip having to do a restore from this
222 * memory buffer to the hardware registers - at the cost of
223 * incurring the overhead of #NM fault traps.
224 *
225 * Note that on modern CPUs that support the XSAVEOPT (or other
226 * optimized XSAVE instructions), we don't use #NM traps anymore,
227 * as the hardware can track whether FPU registers need saving
228 * or not. On such CPUs we activate the non-lazy ('eagerfpu')
229 * logic, which unconditionally saves/restores all FPU state
230 * across context switches. (if FPU state exists.)
231 */
232 union fpregs_state state;
233
234 /*
235 * @last_cpu: 202 * @last_cpu:
236 * 203 *
237 * Records the last CPU on which this context was loaded into 204 * Records the last CPU on which this context was loaded into
@@ -288,6 +255,43 @@ struct fpu {
288 * deal with bursty apps that only use the FPU for a short time: 255 * deal with bursty apps that only use the FPU for a short time:
289 */ 256 */
290 unsigned char counter; 257 unsigned char counter;
258 /*
259 * @state:
260 *
261 * In-memory copy of all FPU registers that we save/restore
262 * over context switches. If the task is using the FPU then
263 * the registers in the FPU are more recent than this state
264 * copy. If the task context-switches away then they get
265 * saved here and represent the FPU state.
266 *
267 * After context switches there may be a (short) time period
268 * during which the in-FPU hardware registers are unchanged
269 * and still perfectly match this state, if the tasks
270 * scheduled afterwards are not using the FPU.
271 *
272 * This is the 'lazy restore' window of optimization, which
273 * we track though 'fpu_fpregs_owner_ctx' and 'fpu->last_cpu'.
274 *
275 * We detect whether a subsequent task uses the FPU via setting
276 * CR0::TS to 1, which causes any FPU use to raise a #NM fault.
277 *
278 * During this window, if the task gets scheduled again, we
279 * might be able to skip having to do a restore from this
280 * memory buffer to the hardware registers - at the cost of
281 * incurring the overhead of #NM fault traps.
282 *
283 * Note that on modern CPUs that support the XSAVEOPT (or other
284 * optimized XSAVE instructions), we don't use #NM traps anymore,
285 * as the hardware can track whether FPU registers need saving
286 * or not. On such CPUs we activate the non-lazy ('eagerfpu')
287 * logic, which unconditionally saves/restores all FPU state
288 * across context switches. (if FPU state exists.)
289 */
290 union fpregs_state state;
291 /*
292 * WARNING: 'state' is dynamically-sized. Do not put
293 * anything after it here.
294 */
291}; 295};
292 296
293#endif /* _ASM_X86_FPU_H */ 297#endif /* _ASM_X86_FPU_H */
diff --git a/arch/x86/include/asm/intel_pmc_ipc.h b/arch/x86/include/asm/intel_pmc_ipc.h
index 200ec2e7821d..cd0310e186f4 100644
--- a/arch/x86/include/asm/intel_pmc_ipc.h
+++ b/arch/x86/include/asm/intel_pmc_ipc.h
@@ -25,36 +25,9 @@
25 25
26#if IS_ENABLED(CONFIG_INTEL_PMC_IPC) 26#if IS_ENABLED(CONFIG_INTEL_PMC_IPC)
27 27
28/*
29 * intel_pmc_ipc_simple_command
30 * @cmd: command
31 * @sub: sub type
32 */
33int intel_pmc_ipc_simple_command(int cmd, int sub); 28int intel_pmc_ipc_simple_command(int cmd, int sub);
34
35/*
36 * intel_pmc_ipc_raw_cmd
37 * @cmd: command
38 * @sub: sub type
39 * @in: input data
40 * @inlen: input length in bytes
41 * @out: output data
42 * @outlen: output length in dwords
43 * @sptr: data writing to SPTR register
44 * @dptr: data writing to DPTR register
45 */
46int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, 29int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen,
47 u32 *out, u32 outlen, u32 dptr, u32 sptr); 30 u32 *out, u32 outlen, u32 dptr, u32 sptr);
48
49/*
50 * intel_pmc_ipc_command
51 * @cmd: command
52 * @sub: sub type
53 * @in: input data
54 * @inlen: input length in bytes
55 * @out: output data
56 * @outlen: output length in dwords
57 */
58int intel_pmc_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen, 31int intel_pmc_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen,
59 u32 *out, u32 outlen); 32 u32 *out, u32 outlen);
60 33
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 2a7f5d782c33..49ec9038ec14 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -604,6 +604,8 @@ struct kvm_arch {
604 bool iommu_noncoherent; 604 bool iommu_noncoherent;
605#define __KVM_HAVE_ARCH_NONCOHERENT_DMA 605#define __KVM_HAVE_ARCH_NONCOHERENT_DMA
606 atomic_t noncoherent_dma_count; 606 atomic_t noncoherent_dma_count;
607#define __KVM_HAVE_ARCH_ASSIGNED_DEVICE
608 atomic_t assigned_device_count;
607 struct kvm_pic *vpic; 609 struct kvm_pic *vpic;
608 struct kvm_ioapic *vioapic; 610 struct kvm_ioapic *vioapic;
609 struct kvm_pit *vpit; 611 struct kvm_pit *vpit;
diff --git a/arch/x86/include/asm/mm-arch-hooks.h b/arch/x86/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 4e881a342236..000000000000
--- a/arch/x86/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_X86_MM_ARCH_HOOKS_H
13#define _ASM_X86_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_X86_MM_ARCH_HOOKS_H */
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index 09b9620a73b4..364d27481a52 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -9,8 +9,7 @@
9 * we put the segment information here. 9 * we put the segment information here.
10 */ 10 */
11typedef struct { 11typedef struct {
12 void *ldt; 12 struct ldt_struct *ldt;
13 int size;
14 13
15#ifdef CONFIG_X86_64 14#ifdef CONFIG_X86_64
16 /* True if mm supports a task running in 32 bit compatibility mode. */ 15 /* True if mm supports a task running in 32 bit compatibility mode. */
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 5e8daee7c5c9..984abfe47edc 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -23,7 +23,7 @@ extern struct static_key rdpmc_always_available;
23 23
24static inline void load_mm_cr4(struct mm_struct *mm) 24static inline void load_mm_cr4(struct mm_struct *mm)
25{ 25{
26 if (static_key_true(&rdpmc_always_available) || 26 if (static_key_false(&rdpmc_always_available) ||
27 atomic_read(&mm->context.perf_rdpmc_allowed)) 27 atomic_read(&mm->context.perf_rdpmc_allowed))
28 cr4_set_bits(X86_CR4_PCE); 28 cr4_set_bits(X86_CR4_PCE);
29 else 29 else
@@ -34,6 +34,50 @@ static inline void load_mm_cr4(struct mm_struct *mm) {}
34#endif 34#endif
35 35
36/* 36/*
37 * ldt_structs can be allocated, used, and freed, but they are never
38 * modified while live.
39 */
40struct ldt_struct {
41 /*
42 * Xen requires page-aligned LDTs with special permissions. This is
43 * needed to prevent us from installing evil descriptors such as
44 * call gates. On native, we could merge the ldt_struct and LDT
45 * allocations, but it's not worth trying to optimize.
46 */
47 struct desc_struct *entries;
48 int size;
49};
50
51static inline void load_mm_ldt(struct mm_struct *mm)
52{
53 struct ldt_struct *ldt;
54
55 /* lockless_dereference synchronizes with smp_store_release */
56 ldt = lockless_dereference(mm->context.ldt);
57
58 /*
59 * Any change to mm->context.ldt is followed by an IPI to all
60 * CPUs with the mm active. The LDT will not be freed until
61 * after the IPI is handled by all such CPUs. This means that,
62 * if the ldt_struct changes before we return, the values we see
63 * will be safe, and the new values will be loaded before we run
64 * any user code.
65 *
66 * NB: don't try to convert this to use RCU without extreme care.
67 * We would still need IRQs off, because we don't want to change
68 * the local LDT after an IPI loaded a newer value than the one
69 * that we can see.
70 */
71
72 if (unlikely(ldt))
73 set_ldt(ldt->entries, ldt->size);
74 else
75 clear_LDT();
76
77 DEBUG_LOCKS_WARN_ON(preemptible());
78}
79
80/*
37 * Used for LDT copy/destruction. 81 * Used for LDT copy/destruction.
38 */ 82 */
39int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 83int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
@@ -78,12 +122,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
78 * was called and then modify_ldt changed 122 * was called and then modify_ldt changed
79 * prev->context.ldt but suppressed an IPI to this CPU. 123 * prev->context.ldt but suppressed an IPI to this CPU.
80 * In this case, prev->context.ldt != NULL, because we 124 * In this case, prev->context.ldt != NULL, because we
81 * never free an LDT while the mm still exists. That 125 * never set context.ldt to NULL while the mm still
82 * means that next->context.ldt != prev->context.ldt, 126 * exists. That means that next->context.ldt !=
83 * because mms never share an LDT. 127 * prev->context.ldt, because mms never share an LDT.
84 */ 128 */
85 if (unlikely(prev->context.ldt != next->context.ldt)) 129 if (unlikely(prev->context.ldt != next->context.ldt))
86 load_LDT_nolock(&next->context); 130 load_mm_ldt(next);
87 } 131 }
88#ifdef CONFIG_SMP 132#ifdef CONFIG_SMP
89 else { 133 else {
@@ -106,7 +150,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
106 load_cr3(next->pgd); 150 load_cr3(next->pgd);
107 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); 151 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
108 load_mm_cr4(next); 152 load_mm_cr4(next);
109 load_LDT_nolock(&next->context); 153 load_mm_ldt(next);
110 } 154 }
111 } 155 }
112#endif 156#endif
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 43e6519df0d5..944f1785ed0d 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -390,9 +390,6 @@ struct thread_struct {
390#endif 390#endif
391 unsigned long gs; 391 unsigned long gs;
392 392
393 /* Floating point and extended processor state */
394 struct fpu fpu;
395
396 /* Save middle states of ptrace breakpoints */ 393 /* Save middle states of ptrace breakpoints */
397 struct perf_event *ptrace_bps[HBP_NUM]; 394 struct perf_event *ptrace_bps[HBP_NUM];
398 /* Debug status used for traps, single steps, etc... */ 395 /* Debug status used for traps, single steps, etc... */
@@ -418,6 +415,13 @@ struct thread_struct {
418 unsigned long iopl; 415 unsigned long iopl;
419 /* Max allowed port in the bitmap, in bytes: */ 416 /* Max allowed port in the bitmap, in bytes: */
420 unsigned io_bitmap_max; 417 unsigned io_bitmap_max;
418
419 /* Floating point and extended processor state */
420 struct fpu fpu;
421 /*
422 * WARNING: 'fpu' is dynamically-sized. It *MUST* be at
423 * the end.
424 */
421}; 425};
422 426
423/* 427/*
diff --git a/arch/x86/include/asm/sigcontext.h b/arch/x86/include/asm/sigcontext.h
index 6fe6b182c998..9dfce4e0417d 100644
--- a/arch/x86/include/asm/sigcontext.h
+++ b/arch/x86/include/asm/sigcontext.h
@@ -57,9 +57,9 @@ struct sigcontext {
57 unsigned long ip; 57 unsigned long ip;
58 unsigned long flags; 58 unsigned long flags;
59 unsigned short cs; 59 unsigned short cs;
60 unsigned short __pad2; /* Was called gs, but was always zero. */ 60 unsigned short gs;
61 unsigned short __pad1; /* Was called fs, but was always zero. */ 61 unsigned short fs;
62 unsigned short ss; 62 unsigned short __pad0;
63 unsigned long err; 63 unsigned long err;
64 unsigned long trapno; 64 unsigned long trapno;
65 unsigned long oldmask; 65 unsigned long oldmask;
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
index 751bf4b7bf11..d7f3b3b78ac3 100644
--- a/arch/x86/include/asm/switch_to.h
+++ b/arch/x86/include/asm/switch_to.h
@@ -79,12 +79,12 @@ do { \
79#else /* CONFIG_X86_32 */ 79#else /* CONFIG_X86_32 */
80 80
81/* frame pointer must be last for get_wchan */ 81/* frame pointer must be last for get_wchan */
82#define SAVE_CONTEXT "pushq %%rbp ; movq %%rsi,%%rbp\n\t" 82#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
83#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\t" 83#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
84 84
85#define __EXTRA_CLOBBER \ 85#define __EXTRA_CLOBBER \
86 , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \ 86 , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
87 "r12", "r13", "r14", "r15", "flags" 87 "r12", "r13", "r14", "r15"
88 88
89#ifdef CONFIG_CC_STACKPROTECTOR 89#ifdef CONFIG_CC_STACKPROTECTOR
90#define __switch_canary \ 90#define __switch_canary \
@@ -100,11 +100,7 @@ do { \
100#define __switch_canary_iparam 100#define __switch_canary_iparam
101#endif /* CC_STACKPROTECTOR */ 101#endif /* CC_STACKPROTECTOR */
102 102
103/* 103/* Save restore flags to clear handle leaking NT */
104 * There is no need to save or restore flags, because flags are always
105 * clean in kernel mode, with the possible exception of IOPL. Kernel IOPL
106 * has no effect.
107 */
108#define switch_to(prev, next, last) \ 104#define switch_to(prev, next, last) \
109 asm volatile(SAVE_CONTEXT \ 105 asm volatile(SAVE_CONTEXT \
110 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ 106 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
diff --git a/arch/x86/include/uapi/asm/hyperv.h b/arch/x86/include/uapi/asm/hyperv.h
index 8fba544e9cc4..f36d56bd7632 100644
--- a/arch/x86/include/uapi/asm/hyperv.h
+++ b/arch/x86/include/uapi/asm/hyperv.h
@@ -108,6 +108,8 @@
108#define HV_X64_HYPERCALL_PARAMS_XMM_AVAILABLE (1 << 4) 108#define HV_X64_HYPERCALL_PARAMS_XMM_AVAILABLE (1 << 4)
109/* Support for a virtual guest idle state is available */ 109/* Support for a virtual guest idle state is available */
110#define HV_X64_GUEST_IDLE_STATE_AVAILABLE (1 << 5) 110#define HV_X64_GUEST_IDLE_STATE_AVAILABLE (1 << 5)
111/* Guest crash data handler available */
112#define HV_X64_GUEST_CRASH_MSR_AVAILABLE (1 << 10)
111 113
112/* 114/*
113 * Implementation recommendations. Indicates which behaviors the hypervisor 115 * Implementation recommendations. Indicates which behaviors the hypervisor
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index a4ae82eb82aa..cd54147cb365 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -354,7 +354,7 @@ struct kvm_xcrs {
354struct kvm_sync_regs { 354struct kvm_sync_regs {
355}; 355};
356 356
357#define KVM_QUIRK_LINT0_REENABLED (1 << 0) 357#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
358#define KVM_QUIRK_CD_NW_CLEARED (1 << 1) 358#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1)
359 359
360#endif /* _ASM_X86_KVM_H */ 360#endif /* _ASM_X86_KVM_H */
diff --git a/arch/x86/include/uapi/asm/sigcontext.h b/arch/x86/include/uapi/asm/sigcontext.h
index 0e8a973de9ee..40836a9a7250 100644
--- a/arch/x86/include/uapi/asm/sigcontext.h
+++ b/arch/x86/include/uapi/asm/sigcontext.h
@@ -177,24 +177,9 @@ struct sigcontext {
177 __u64 rip; 177 __u64 rip;
178 __u64 eflags; /* RFLAGS */ 178 __u64 eflags; /* RFLAGS */
179 __u16 cs; 179 __u16 cs;
180 180 __u16 gs;
181 /* 181 __u16 fs;
182 * Prior to 2.5.64 ("[PATCH] x86-64 updates for 2.5.64-bk3"), 182 __u16 __pad0;
183 * Linux saved and restored fs and gs in these slots. This
184 * was counterproductive, as fsbase and gsbase were never
185 * saved, so arch_prctl was presumably unreliable.
186 *
187 * If these slots are ever needed for any other purpose, there
188 * is some risk that very old 64-bit binaries could get
189 * confused. I doubt that many such binaries still work,
190 * though, since the same patch in 2.5.64 also removed the
191 * 64-bit set_thread_area syscall, so it appears that there is
192 * no TLS API that works in both pre- and post-2.5.64 kernels.
193 */
194 __u16 __pad2; /* Was gs. */
195 __u16 __pad1; /* Was fs. */
196
197 __u16 ss;
198 __u64 err; 183 __u64 err;
199 __u64 trapno; 184 __u64 trapno;
200 __u64 oldmask; 185 __u64 oldmask;
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 845dc0df2002..206052e55517 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -943,7 +943,7 @@ static bool mp_check_pin_attr(int irq, struct irq_alloc_info *info)
943 */ 943 */
944 if (irq < nr_legacy_irqs() && data->count == 1) { 944 if (irq < nr_legacy_irqs() && data->count == 1) {
945 if (info->ioapic_trigger != data->trigger) 945 if (info->ioapic_trigger != data->trigger)
946 mp_register_handler(irq, data->trigger); 946 mp_register_handler(irq, info->ioapic_trigger);
947 data->entry.trigger = data->trigger = info->ioapic_trigger; 947 data->entry.trigger = data->trigger = info->ioapic_trigger;
948 data->entry.polarity = data->polarity = info->ioapic_polarity; 948 data->entry.polarity = data->polarity = info->ioapic_polarity;
949 } 949 }
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index f813261d9740..2683f36e4e0a 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -322,7 +322,7 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
322 irq_data->chip = &lapic_controller; 322 irq_data->chip = &lapic_controller;
323 irq_data->chip_data = data; 323 irq_data->chip_data = data;
324 irq_data->hwirq = virq + i; 324 irq_data->hwirq = virq + i;
325 err = assign_irq_vector_policy(virq, irq_data->node, data, 325 err = assign_irq_vector_policy(virq + i, irq_data->node, data,
326 info); 326 info);
327 if (err) 327 if (err)
328 goto error; 328 goto error;
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 922c5e0cea4c..cb9e5df42dd2 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1410,7 +1410,7 @@ void cpu_init(void)
1410 load_sp0(t, &current->thread); 1410 load_sp0(t, &current->thread);
1411 set_tss_desc(cpu, t); 1411 set_tss_desc(cpu, t);
1412 load_TR_desc(); 1412 load_TR_desc();
1413 load_LDT(&init_mm.context); 1413 load_mm_ldt(&init_mm);
1414 1414
1415 clear_all_debug_regs(); 1415 clear_all_debug_regs();
1416 dbg_restore_debug_regs(); 1416 dbg_restore_debug_regs();
@@ -1459,7 +1459,7 @@ void cpu_init(void)
1459 load_sp0(t, thread); 1459 load_sp0(t, thread);
1460 set_tss_desc(cpu, t); 1460 set_tss_desc(cpu, t);
1461 load_TR_desc(); 1461 load_TR_desc();
1462 load_LDT(&init_mm.context); 1462 load_mm_ldt(&init_mm);
1463 1463
1464 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); 1464 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
1465 1465
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 3658de47900f..9469dfa55607 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -2179,21 +2179,25 @@ static unsigned long get_segment_base(unsigned int segment)
2179 int idx = segment >> 3; 2179 int idx = segment >> 3;
2180 2180
2181 if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) { 2181 if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
2182 struct ldt_struct *ldt;
2183
2182 if (idx > LDT_ENTRIES) 2184 if (idx > LDT_ENTRIES)
2183 return 0; 2185 return 0;
2184 2186
2185 if (idx > current->active_mm->context.size) 2187 /* IRQs are off, so this synchronizes with smp_store_release */
2188 ldt = lockless_dereference(current->active_mm->context.ldt);
2189 if (!ldt || idx > ldt->size)
2186 return 0; 2190 return 0;
2187 2191
2188 desc = current->active_mm->context.ldt; 2192 desc = &ldt->entries[idx];
2189 } else { 2193 } else {
2190 if (idx > GDT_ENTRIES) 2194 if (idx > GDT_ENTRIES)
2191 return 0; 2195 return 0;
2192 2196
2193 desc = raw_cpu_ptr(gdt_page.gdt); 2197 desc = raw_cpu_ptr(gdt_page.gdt) + idx;
2194 } 2198 }
2195 2199
2196 return get_desc_base(desc + idx); 2200 return get_desc_base(desc);
2197} 2201}
2198 2202
2199#ifdef CONFIG_COMPAT 2203#ifdef CONFIG_COMPAT
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index b9826a981fb2..6326ae24e4d5 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -2534,7 +2534,7 @@ static int intel_pmu_cpu_prepare(int cpu)
2534 if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) { 2534 if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
2535 cpuc->shared_regs = allocate_shared_regs(cpu); 2535 cpuc->shared_regs = allocate_shared_regs(cpu);
2536 if (!cpuc->shared_regs) 2536 if (!cpuc->shared_regs)
2537 return NOTIFY_BAD; 2537 goto err;
2538 } 2538 }
2539 2539
2540 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) { 2540 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
@@ -2542,18 +2542,27 @@ static int intel_pmu_cpu_prepare(int cpu)
2542 2542
2543 cpuc->constraint_list = kzalloc(sz, GFP_KERNEL); 2543 cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
2544 if (!cpuc->constraint_list) 2544 if (!cpuc->constraint_list)
2545 return NOTIFY_BAD; 2545 goto err_shared_regs;
2546 2546
2547 cpuc->excl_cntrs = allocate_excl_cntrs(cpu); 2547 cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
2548 if (!cpuc->excl_cntrs) { 2548 if (!cpuc->excl_cntrs)
2549 kfree(cpuc->constraint_list); 2549 goto err_constraint_list;
2550 kfree(cpuc->shared_regs); 2550
2551 return NOTIFY_BAD;
2552 }
2553 cpuc->excl_thread_id = 0; 2551 cpuc->excl_thread_id = 0;
2554 } 2552 }
2555 2553
2556 return NOTIFY_OK; 2554 return NOTIFY_OK;
2555
2556err_constraint_list:
2557 kfree(cpuc->constraint_list);
2558 cpuc->constraint_list = NULL;
2559
2560err_shared_regs:
2561 kfree(cpuc->shared_regs);
2562 cpuc->shared_regs = NULL;
2563
2564err:
2565 return NOTIFY_BAD;
2557} 2566}
2558 2567
2559static void intel_pmu_cpu_starting(int cpu) 2568static void intel_pmu_cpu_starting(int cpu)
diff --git a/arch/x86/kernel/cpu/perf_event_intel_cqm.c b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
index 188076161c1b..377e8f8ed391 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_cqm.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
@@ -952,6 +952,14 @@ static u64 intel_cqm_event_count(struct perf_event *event)
952 return 0; 952 return 0;
953 953
954 /* 954 /*
955 * Getting up-to-date values requires an SMP IPI which is not
956 * possible if we're being called in interrupt context. Return
957 * the cached values instead.
958 */
959 if (unlikely(in_interrupt()))
960 goto out;
961
962 /*
955 * Notice that we don't perform the reading of an RMID 963 * Notice that we don't perform the reading of an RMID
956 * atomically, because we can't hold a spin lock across the 964 * atomically, because we can't hold a spin lock across the
957 * IPIs. 965 * IPIs.
@@ -1247,7 +1255,7 @@ static inline void cqm_pick_event_reader(int cpu)
1247 cpumask_set_cpu(cpu, &cqm_cpumask); 1255 cpumask_set_cpu(cpu, &cqm_cpumask);
1248} 1256}
1249 1257
1250static void intel_cqm_cpu_prepare(unsigned int cpu) 1258static void intel_cqm_cpu_starting(unsigned int cpu)
1251{ 1259{
1252 struct intel_pqr_state *state = &per_cpu(pqr_state, cpu); 1260 struct intel_pqr_state *state = &per_cpu(pqr_state, cpu);
1253 struct cpuinfo_x86 *c = &cpu_data(cpu); 1261 struct cpuinfo_x86 *c = &cpu_data(cpu);
@@ -1288,13 +1296,11 @@ static int intel_cqm_cpu_notifier(struct notifier_block *nb,
1288 unsigned int cpu = (unsigned long)hcpu; 1296 unsigned int cpu = (unsigned long)hcpu;
1289 1297
1290 switch (action & ~CPU_TASKS_FROZEN) { 1298 switch (action & ~CPU_TASKS_FROZEN) {
1291 case CPU_UP_PREPARE:
1292 intel_cqm_cpu_prepare(cpu);
1293 break;
1294 case CPU_DOWN_PREPARE: 1299 case CPU_DOWN_PREPARE:
1295 intel_cqm_cpu_exit(cpu); 1300 intel_cqm_cpu_exit(cpu);
1296 break; 1301 break;
1297 case CPU_STARTING: 1302 case CPU_STARTING:
1303 intel_cqm_cpu_starting(cpu);
1298 cqm_pick_event_reader(cpu); 1304 cqm_pick_event_reader(cpu);
1299 break; 1305 break;
1300 } 1306 }
@@ -1365,7 +1371,7 @@ static int __init intel_cqm_init(void)
1365 goto out; 1371 goto out;
1366 1372
1367 for_each_online_cpu(i) { 1373 for_each_online_cpu(i) {
1368 intel_cqm_cpu_prepare(i); 1374 intel_cqm_cpu_starting(i);
1369 cqm_pick_event_reader(i); 1375 cqm_pick_event_reader(i);
1370 } 1376 }
1371 1377
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 79de954626fd..d25097c3fc1d 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -270,7 +270,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
270 dst_fpu->fpregs_active = 0; 270 dst_fpu->fpregs_active = 0;
271 dst_fpu->last_cpu = -1; 271 dst_fpu->last_cpu = -1;
272 272
273 if (src_fpu->fpstate_active) 273 if (src_fpu->fpstate_active && cpu_has_fpu)
274 fpu_copy(dst_fpu, src_fpu); 274 fpu_copy(dst_fpu, src_fpu);
275 275
276 return 0; 276 return 0;
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index 32826791e675..d14e9ac3235a 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -4,6 +4,8 @@
4#include <asm/fpu/internal.h> 4#include <asm/fpu/internal.h>
5#include <asm/tlbflush.h> 5#include <asm/tlbflush.h>
6 6
7#include <linux/sched.h>
8
7/* 9/*
8 * Initialize the TS bit in CR0 according to the style of context-switches 10 * Initialize the TS bit in CR0 according to the style of context-switches
9 * we are using: 11 * we are using:
@@ -38,7 +40,12 @@ static void fpu__init_cpu_generic(void)
38 write_cr0(cr0); 40 write_cr0(cr0);
39 41
40 /* Flush out any pending x87 state: */ 42 /* Flush out any pending x87 state: */
41 asm volatile ("fninit"); 43#ifdef CONFIG_MATH_EMULATION
44 if (!cpu_has_fpu)
45 fpstate_init_soft(&current->thread.fpu.state.soft);
46 else
47#endif
48 asm volatile ("fninit");
42} 49}
43 50
44/* 51/*
@@ -136,6 +143,43 @@ static void __init fpu__init_system_generic(void)
136unsigned int xstate_size; 143unsigned int xstate_size;
137EXPORT_SYMBOL_GPL(xstate_size); 144EXPORT_SYMBOL_GPL(xstate_size);
138 145
146/* Enforce that 'MEMBER' is the last field of 'TYPE': */
147#define CHECK_MEMBER_AT_END_OF(TYPE, MEMBER) \
148 BUILD_BUG_ON(sizeof(TYPE) != offsetofend(TYPE, MEMBER))
149
150/*
151 * We append the 'struct fpu' to the task_struct:
152 */
153static void __init fpu__init_task_struct_size(void)
154{
155 int task_size = sizeof(struct task_struct);
156
157 /*
158 * Subtract off the static size of the register state.
159 * It potentially has a bunch of padding.
160 */
161 task_size -= sizeof(((struct task_struct *)0)->thread.fpu.state);
162
163 /*
164 * Add back the dynamically-calculated register state
165 * size.
166 */
167 task_size += xstate_size;
168
169 /*
170 * We dynamically size 'struct fpu', so we require that
171 * it be at the end of 'thread_struct' and that
172 * 'thread_struct' be at the end of 'task_struct'. If
173 * you hit a compile error here, check the structure to
174 * see if something got added to the end.
175 */
176 CHECK_MEMBER_AT_END_OF(struct fpu, state);
177 CHECK_MEMBER_AT_END_OF(struct thread_struct, fpu);
178 CHECK_MEMBER_AT_END_OF(struct task_struct, thread);
179
180 arch_task_struct_size = task_size;
181}
182
139/* 183/*
140 * Set up the xstate_size based on the legacy FPU context size. 184 * Set up the xstate_size based on the legacy FPU context size.
141 * 185 *
@@ -287,6 +331,7 @@ void __init fpu__init_system(struct cpuinfo_x86 *c)
287 fpu__init_system_generic(); 331 fpu__init_system_generic();
288 fpu__init_system_xstate_size_legacy(); 332 fpu__init_system_xstate_size_legacy();
289 fpu__init_system_xstate(); 333 fpu__init_system_xstate();
334 fpu__init_task_struct_size();
290 335
291 fpu__init_system_ctx_switch(); 336 fpu__init_system_ctx_switch();
292} 337}
@@ -311,9 +356,15 @@ static int __init x86_noxsave_setup(char *s)
311 356
312 setup_clear_cpu_cap(X86_FEATURE_XSAVE); 357 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
313 setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); 358 setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
359 setup_clear_cpu_cap(X86_FEATURE_XSAVEC);
314 setup_clear_cpu_cap(X86_FEATURE_XSAVES); 360 setup_clear_cpu_cap(X86_FEATURE_XSAVES);
315 setup_clear_cpu_cap(X86_FEATURE_AVX); 361 setup_clear_cpu_cap(X86_FEATURE_AVX);
316 setup_clear_cpu_cap(X86_FEATURE_AVX2); 362 setup_clear_cpu_cap(X86_FEATURE_AVX2);
363 setup_clear_cpu_cap(X86_FEATURE_AVX512F);
364 setup_clear_cpu_cap(X86_FEATURE_AVX512PF);
365 setup_clear_cpu_cap(X86_FEATURE_AVX512ER);
366 setup_clear_cpu_cap(X86_FEATURE_AVX512CD);
367 setup_clear_cpu_cap(X86_FEATURE_MPX);
317 368
318 return 1; 369 return 1;
319} 370}
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index c37886d759cc..2bcc0525f1c1 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -12,6 +12,7 @@
12#include <linux/string.h> 12#include <linux/string.h>
13#include <linux/mm.h> 13#include <linux/mm.h>
14#include <linux/smp.h> 14#include <linux/smp.h>
15#include <linux/slab.h>
15#include <linux/vmalloc.h> 16#include <linux/vmalloc.h>
16#include <linux/uaccess.h> 17#include <linux/uaccess.h>
17 18
@@ -20,82 +21,82 @@
20#include <asm/mmu_context.h> 21#include <asm/mmu_context.h>
21#include <asm/syscalls.h> 22#include <asm/syscalls.h>
22 23
23#ifdef CONFIG_SMP 24/* context.lock is held for us, so we don't need any locking. */
24static void flush_ldt(void *current_mm) 25static void flush_ldt(void *current_mm)
25{ 26{
26 if (current->active_mm == current_mm) 27 mm_context_t *pc;
27 load_LDT(&current->active_mm->context); 28
29 if (current->active_mm != current_mm)
30 return;
31
32 pc = &current->active_mm->context;
33 set_ldt(pc->ldt->entries, pc->ldt->size);
28} 34}
29#endif
30 35
31static int alloc_ldt(mm_context_t *pc, int mincount, int reload) 36/* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
37static struct ldt_struct *alloc_ldt_struct(int size)
32{ 38{
33 void *oldldt, *newldt; 39 struct ldt_struct *new_ldt;
34 int oldsize; 40 int alloc_size;
35 41
36 if (mincount <= pc->size) 42 if (size > LDT_ENTRIES)
37 return 0; 43 return NULL;
38 oldsize = pc->size; 44
39 mincount = (mincount + (PAGE_SIZE / LDT_ENTRY_SIZE - 1)) & 45 new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL);
40 (~(PAGE_SIZE / LDT_ENTRY_SIZE - 1)); 46 if (!new_ldt)
41 if (mincount * LDT_ENTRY_SIZE > PAGE_SIZE) 47 return NULL;
42 newldt = vmalloc(mincount * LDT_ENTRY_SIZE); 48
49 BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct));
50 alloc_size = size * LDT_ENTRY_SIZE;
51
52 /*
53 * Xen is very picky: it requires a page-aligned LDT that has no
54 * trailing nonzero bytes in any page that contains LDT descriptors.
55 * Keep it simple: zero the whole allocation and never allocate less
56 * than PAGE_SIZE.
57 */
58 if (alloc_size > PAGE_SIZE)
59 new_ldt->entries = vzalloc(alloc_size);
43 else 60 else
44 newldt = (void *)__get_free_page(GFP_KERNEL); 61 new_ldt->entries = kzalloc(PAGE_SIZE, GFP_KERNEL);
45
46 if (!newldt)
47 return -ENOMEM;
48 62
49 if (oldsize) 63 if (!new_ldt->entries) {
50 memcpy(newldt, pc->ldt, oldsize * LDT_ENTRY_SIZE); 64 kfree(new_ldt);
51 oldldt = pc->ldt; 65 return NULL;
52 memset(newldt + oldsize * LDT_ENTRY_SIZE, 0, 66 }
53 (mincount - oldsize) * LDT_ENTRY_SIZE);
54 67
55 paravirt_alloc_ldt(newldt, mincount); 68 new_ldt->size = size;
69 return new_ldt;
70}
56 71
57#ifdef CONFIG_X86_64 72/* After calling this, the LDT is immutable. */
58 /* CHECKME: Do we really need this ? */ 73static void finalize_ldt_struct(struct ldt_struct *ldt)
59 wmb(); 74{
60#endif 75 paravirt_alloc_ldt(ldt->entries, ldt->size);
61 pc->ldt = newldt;
62 wmb();
63 pc->size = mincount;
64 wmb();
65
66 if (reload) {
67#ifdef CONFIG_SMP
68 preempt_disable();
69 load_LDT(pc);
70 if (!cpumask_equal(mm_cpumask(current->mm),
71 cpumask_of(smp_processor_id())))
72 smp_call_function(flush_ldt, current->mm, 1);
73 preempt_enable();
74#else
75 load_LDT(pc);
76#endif
77 }
78 if (oldsize) {
79 paravirt_free_ldt(oldldt, oldsize);
80 if (oldsize * LDT_ENTRY_SIZE > PAGE_SIZE)
81 vfree(oldldt);
82 else
83 put_page(virt_to_page(oldldt));
84 }
85 return 0;
86} 76}
87 77
88static inline int copy_ldt(mm_context_t *new, mm_context_t *old) 78/* context.lock is held */
79static void install_ldt(struct mm_struct *current_mm,
80 struct ldt_struct *ldt)
89{ 81{
90 int err = alloc_ldt(new, old->size, 0); 82 /* Synchronizes with lockless_dereference in load_mm_ldt. */
91 int i; 83 smp_store_release(&current_mm->context.ldt, ldt);
84
85 /* Activate the LDT for all CPUs using current_mm. */
86 on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true);
87}
92 88
93 if (err < 0) 89static void free_ldt_struct(struct ldt_struct *ldt)
94 return err; 90{
91 if (likely(!ldt))
92 return;
95 93
96 for (i = 0; i < old->size; i++) 94 paravirt_free_ldt(ldt->entries, ldt->size);
97 write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE); 95 if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE)
98 return 0; 96 vfree(ldt->entries);
97 else
98 kfree(ldt->entries);
99 kfree(ldt);
99} 100}
100 101
101/* 102/*
@@ -104,17 +105,37 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
104 */ 105 */
105int init_new_context(struct task_struct *tsk, struct mm_struct *mm) 106int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
106{ 107{
108 struct ldt_struct *new_ldt;
107 struct mm_struct *old_mm; 109 struct mm_struct *old_mm;
108 int retval = 0; 110 int retval = 0;
109 111
110 mutex_init(&mm->context.lock); 112 mutex_init(&mm->context.lock);
111 mm->context.size = 0;
112 old_mm = current->mm; 113 old_mm = current->mm;
113 if (old_mm && old_mm->context.size > 0) { 114 if (!old_mm) {
114 mutex_lock(&old_mm->context.lock); 115 mm->context.ldt = NULL;
115 retval = copy_ldt(&mm->context, &old_mm->context); 116 return 0;
116 mutex_unlock(&old_mm->context.lock);
117 } 117 }
118
119 mutex_lock(&old_mm->context.lock);
120 if (!old_mm->context.ldt) {
121 mm->context.ldt = NULL;
122 goto out_unlock;
123 }
124
125 new_ldt = alloc_ldt_struct(old_mm->context.ldt->size);
126 if (!new_ldt) {
127 retval = -ENOMEM;
128 goto out_unlock;
129 }
130
131 memcpy(new_ldt->entries, old_mm->context.ldt->entries,
132 new_ldt->size * LDT_ENTRY_SIZE);
133 finalize_ldt_struct(new_ldt);
134
135 mm->context.ldt = new_ldt;
136
137out_unlock:
138 mutex_unlock(&old_mm->context.lock);
118 return retval; 139 return retval;
119} 140}
120 141
@@ -125,53 +146,47 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
125 */ 146 */
126void destroy_context(struct mm_struct *mm) 147void destroy_context(struct mm_struct *mm)
127{ 148{
128 if (mm->context.size) { 149 free_ldt_struct(mm->context.ldt);
129#ifdef CONFIG_X86_32 150 mm->context.ldt = NULL;
130 /* CHECKME: Can this ever happen ? */
131 if (mm == current->active_mm)
132 clear_LDT();
133#endif
134 paravirt_free_ldt(mm->context.ldt, mm->context.size);
135 if (mm->context.size * LDT_ENTRY_SIZE > PAGE_SIZE)
136 vfree(mm->context.ldt);
137 else
138 put_page(virt_to_page(mm->context.ldt));
139 mm->context.size = 0;
140 }
141} 151}
142 152
143static int read_ldt(void __user *ptr, unsigned long bytecount) 153static int read_ldt(void __user *ptr, unsigned long bytecount)
144{ 154{
145 int err; 155 int retval;
146 unsigned long size; 156 unsigned long size;
147 struct mm_struct *mm = current->mm; 157 struct mm_struct *mm = current->mm;
148 158
149 if (!mm->context.size) 159 mutex_lock(&mm->context.lock);
150 return 0; 160
161 if (!mm->context.ldt) {
162 retval = 0;
163 goto out_unlock;
164 }
165
151 if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES) 166 if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
152 bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES; 167 bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
153 168
154 mutex_lock(&mm->context.lock); 169 size = mm->context.ldt->size * LDT_ENTRY_SIZE;
155 size = mm->context.size * LDT_ENTRY_SIZE;
156 if (size > bytecount) 170 if (size > bytecount)
157 size = bytecount; 171 size = bytecount;
158 172
159 err = 0; 173 if (copy_to_user(ptr, mm->context.ldt->entries, size)) {
160 if (copy_to_user(ptr, mm->context.ldt, size)) 174 retval = -EFAULT;
161 err = -EFAULT; 175 goto out_unlock;
162 mutex_unlock(&mm->context.lock); 176 }
163 if (err < 0) 177
164 goto error_return;
165 if (size != bytecount) { 178 if (size != bytecount) {
166 /* zero-fill the rest */ 179 /* Zero-fill the rest and pretend we read bytecount bytes. */
167 if (clear_user(ptr + size, bytecount - size) != 0) { 180 if (clear_user(ptr + size, bytecount - size)) {
168 err = -EFAULT; 181 retval = -EFAULT;
169 goto error_return; 182 goto out_unlock;
170 } 183 }
171 } 184 }
172 return bytecount; 185 retval = bytecount;
173error_return: 186
174 return err; 187out_unlock:
188 mutex_unlock(&mm->context.lock);
189 return retval;
175} 190}
176 191
177static int read_default_ldt(void __user *ptr, unsigned long bytecount) 192static int read_default_ldt(void __user *ptr, unsigned long bytecount)
@@ -195,6 +210,8 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
195 struct desc_struct ldt; 210 struct desc_struct ldt;
196 int error; 211 int error;
197 struct user_desc ldt_info; 212 struct user_desc ldt_info;
213 int oldsize, newsize;
214 struct ldt_struct *new_ldt, *old_ldt;
198 215
199 error = -EINVAL; 216 error = -EINVAL;
200 if (bytecount != sizeof(ldt_info)) 217 if (bytecount != sizeof(ldt_info))
@@ -213,34 +230,39 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
213 goto out; 230 goto out;
214 } 231 }
215 232
216 mutex_lock(&mm->context.lock); 233 if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) ||
217 if (ldt_info.entry_number >= mm->context.size) { 234 LDT_empty(&ldt_info)) {
218 error = alloc_ldt(&current->mm->context, 235 /* The user wants to clear the entry. */
219 ldt_info.entry_number + 1, 1); 236 memset(&ldt, 0, sizeof(ldt));
220 if (error < 0) 237 } else {
221 goto out_unlock; 238 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
222 } 239 error = -EINVAL;
223 240 goto out;
224 /* Allow LDTs to be cleared by the user. */
225 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
226 if (oldmode || LDT_empty(&ldt_info)) {
227 memset(&ldt, 0, sizeof(ldt));
228 goto install;
229 } 241 }
242
243 fill_ldt(&ldt, &ldt_info);
244 if (oldmode)
245 ldt.avl = 0;
230 } 246 }
231 247
232 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) { 248 mutex_lock(&mm->context.lock);
233 error = -EINVAL; 249
250 old_ldt = mm->context.ldt;
251 oldsize = old_ldt ? old_ldt->size : 0;
252 newsize = max((int)(ldt_info.entry_number + 1), oldsize);
253
254 error = -ENOMEM;
255 new_ldt = alloc_ldt_struct(newsize);
256 if (!new_ldt)
234 goto out_unlock; 257 goto out_unlock;
235 }
236 258
237 fill_ldt(&ldt, &ldt_info); 259 if (old_ldt)
238 if (oldmode) 260 memcpy(new_ldt->entries, old_ldt->entries, oldsize * LDT_ENTRY_SIZE);
239 ldt.avl = 0; 261 new_ldt->entries[ldt_info.entry_number] = ldt;
262 finalize_ldt_struct(new_ldt);
240 263
241 /* Install the new entry ... */ 264 install_ldt(mm, new_ldt);
242install: 265 free_ldt_struct(old_ldt);
243 write_ldt_entry(mm->context.ldt, ldt_info.entry_number, &ldt);
244 error = 0; 266 error = 0;
245 267
246out_unlock: 268out_unlock:
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index c3e985d1751c..d05bd2e2ee91 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -408,15 +408,15 @@ static void default_do_nmi(struct pt_regs *regs)
408NOKPROBE_SYMBOL(default_do_nmi); 408NOKPROBE_SYMBOL(default_do_nmi);
409 409
410/* 410/*
411 * NMIs can hit breakpoints which will cause it to lose its 411 * NMIs can page fault or hit breakpoints which will cause it to lose
412 * NMI context with the CPU when the breakpoint does an iret. 412 * its NMI context with the CPU when the breakpoint or page fault does an IRET.
413 */ 413 *
414#ifdef CONFIG_X86_32 414 * As a result, NMIs can nest if NMIs get unmasked due an IRET during
415/* 415 * NMI processing. On x86_64, the asm glue protects us from nested NMIs
416 * For i386, NMIs use the same stack as the kernel, and we can 416 * if the outer NMI came from kernel mode, but we can still nest if the
417 * add a workaround to the iret problem in C (preventing nested 417 * outer NMI came from user mode.
418 * NMIs if an NMI takes a trap). Simply have 3 states the NMI 418 *
419 * can be in: 419 * To handle these nested NMIs, we have three states:
420 * 420 *
421 * 1) not running 421 * 1) not running
422 * 2) executing 422 * 2) executing
@@ -430,15 +430,14 @@ NOKPROBE_SYMBOL(default_do_nmi);
430 * (Note, the latch is binary, thus multiple NMIs triggering, 430 * (Note, the latch is binary, thus multiple NMIs triggering,
431 * when one is running, are ignored. Only one NMI is restarted.) 431 * when one is running, are ignored. Only one NMI is restarted.)
432 * 432 *
433 * If an NMI hits a breakpoint that executes an iret, another 433 * If an NMI executes an iret, another NMI can preempt it. We do not
434 * NMI can preempt it. We do not want to allow this new NMI 434 * want to allow this new NMI to run, but we want to execute it when the
435 * to run, but we want to execute it when the first one finishes. 435 * first one finishes. We set the state to "latched", and the exit of
436 * We set the state to "latched", and the exit of the first NMI will 436 * the first NMI will perform a dec_return, if the result is zero
437 * perform a dec_return, if the result is zero (NOT_RUNNING), then 437 * (NOT_RUNNING), then it will simply exit the NMI handler. If not, the
438 * it will simply exit the NMI handler. If not, the dec_return 438 * dec_return would have set the state to NMI_EXECUTING (what we want it
439 * would have set the state to NMI_EXECUTING (what we want it to 439 * to be when we are running). In this case, we simply jump back to
440 * be when we are running). In this case, we simply jump back 440 * rerun the NMI handler again, and restart the 'latched' NMI.
441 * to rerun the NMI handler again, and restart the 'latched' NMI.
442 * 441 *
443 * No trap (breakpoint or page fault) should be hit before nmi_restart, 442 * No trap (breakpoint or page fault) should be hit before nmi_restart,
444 * thus there is no race between the first check of state for NOT_RUNNING 443 * thus there is no race between the first check of state for NOT_RUNNING
@@ -461,49 +460,36 @@ enum nmi_states {
461static DEFINE_PER_CPU(enum nmi_states, nmi_state); 460static DEFINE_PER_CPU(enum nmi_states, nmi_state);
462static DEFINE_PER_CPU(unsigned long, nmi_cr2); 461static DEFINE_PER_CPU(unsigned long, nmi_cr2);
463 462
464#define nmi_nesting_preprocess(regs) \ 463#ifdef CONFIG_X86_64
465 do { \
466 if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) { \
467 this_cpu_write(nmi_state, NMI_LATCHED); \
468 return; \
469 } \
470 this_cpu_write(nmi_state, NMI_EXECUTING); \
471 this_cpu_write(nmi_cr2, read_cr2()); \
472 } while (0); \
473 nmi_restart:
474
475#define nmi_nesting_postprocess() \
476 do { \
477 if (unlikely(this_cpu_read(nmi_cr2) != read_cr2())) \
478 write_cr2(this_cpu_read(nmi_cr2)); \
479 if (this_cpu_dec_return(nmi_state)) \
480 goto nmi_restart; \
481 } while (0)
482#else /* x86_64 */
483/* 464/*
484 * In x86_64 things are a bit more difficult. This has the same problem 465 * In x86_64, we need to handle breakpoint -> NMI -> breakpoint. Without
485 * where an NMI hitting a breakpoint that calls iret will remove the 466 * some care, the inner breakpoint will clobber the outer breakpoint's
486 * NMI context, allowing a nested NMI to enter. What makes this more 467 * stack.
487 * difficult is that both NMIs and breakpoints have their own stack.
488 * When a new NMI or breakpoint is executed, the stack is set to a fixed
489 * point. If an NMI is nested, it will have its stack set at that same
490 * fixed address that the first NMI had, and will start corrupting the
491 * stack. This is handled in entry_64.S, but the same problem exists with
492 * the breakpoint stack.
493 * 468 *
494 * If a breakpoint is being processed, and the debug stack is being used, 469 * If a breakpoint is being processed, and the debug stack is being
495 * if an NMI comes in and also hits a breakpoint, the stack pointer 470 * used, if an NMI comes in and also hits a breakpoint, the stack
496 * will be set to the same fixed address as the breakpoint that was 471 * pointer will be set to the same fixed address as the breakpoint that
497 * interrupted, causing that stack to be corrupted. To handle this case, 472 * was interrupted, causing that stack to be corrupted. To handle this
498 * check if the stack that was interrupted is the debug stack, and if 473 * case, check if the stack that was interrupted is the debug stack, and
499 * so, change the IDT so that new breakpoints will use the current stack 474 * if so, change the IDT so that new breakpoints will use the current
500 * and not switch to the fixed address. On return of the NMI, switch back 475 * stack and not switch to the fixed address. On return of the NMI,
501 * to the original IDT. 476 * switch back to the original IDT.
502 */ 477 */
503static DEFINE_PER_CPU(int, update_debug_stack); 478static DEFINE_PER_CPU(int, update_debug_stack);
479#endif
504 480
505static inline void nmi_nesting_preprocess(struct pt_regs *regs) 481dotraplinkage notrace void
482do_nmi(struct pt_regs *regs, long error_code)
506{ 483{
484 if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) {
485 this_cpu_write(nmi_state, NMI_LATCHED);
486 return;
487 }
488 this_cpu_write(nmi_state, NMI_EXECUTING);
489 this_cpu_write(nmi_cr2, read_cr2());
490nmi_restart:
491
492#ifdef CONFIG_X86_64
507 /* 493 /*
508 * If we interrupted a breakpoint, it is possible that 494 * If we interrupted a breakpoint, it is possible that
509 * the nmi handler will have breakpoints too. We need to 495 * the nmi handler will have breakpoints too. We need to
@@ -514,22 +500,8 @@ static inline void nmi_nesting_preprocess(struct pt_regs *regs)
514 debug_stack_set_zero(); 500 debug_stack_set_zero();
515 this_cpu_write(update_debug_stack, 1); 501 this_cpu_write(update_debug_stack, 1);
516 } 502 }
517}
518
519static inline void nmi_nesting_postprocess(void)
520{
521 if (unlikely(this_cpu_read(update_debug_stack))) {
522 debug_stack_reset();
523 this_cpu_write(update_debug_stack, 0);
524 }
525}
526#endif 503#endif
527 504
528dotraplinkage notrace void
529do_nmi(struct pt_regs *regs, long error_code)
530{
531 nmi_nesting_preprocess(regs);
532
533 nmi_enter(); 505 nmi_enter();
534 506
535 inc_irq_stat(__nmi_count); 507 inc_irq_stat(__nmi_count);
@@ -539,8 +511,17 @@ do_nmi(struct pt_regs *regs, long error_code)
539 511
540 nmi_exit(); 512 nmi_exit();
541 513
542 /* On i386, may loop back to preprocess */ 514#ifdef CONFIG_X86_64
543 nmi_nesting_postprocess(); 515 if (unlikely(this_cpu_read(update_debug_stack))) {
516 debug_stack_reset();
517 this_cpu_write(update_debug_stack, 0);
518 }
519#endif
520
521 if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))
522 write_cr2(this_cpu_read(nmi_cr2));
523 if (this_cpu_dec_return(nmi_state))
524 goto nmi_restart;
544} 525}
545NOKPROBE_SYMBOL(do_nmi); 526NOKPROBE_SYMBOL(do_nmi);
546 527
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 9cad694ed7c4..c27cad726765 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -81,7 +81,7 @@ EXPORT_SYMBOL_GPL(idle_notifier_unregister);
81 */ 81 */
82int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) 82int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
83{ 83{
84 *dst = *src; 84 memcpy(dst, src, arch_task_struct_size);
85 85
86 return fpu__copy(&dst->thread.fpu, &src->thread.fpu); 86 return fpu__copy(&dst->thread.fpu, &src->thread.fpu);
87} 87}
@@ -408,6 +408,7 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
408static void mwait_idle(void) 408static void mwait_idle(void)
409{ 409{
410 if (!current_set_polling_and_test()) { 410 if (!current_set_polling_and_test()) {
411 trace_cpu_idle_rcuidle(1, smp_processor_id());
411 if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) { 412 if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
412 smp_mb(); /* quirk */ 413 smp_mb(); /* quirk */
413 clflush((void *)&current_thread_info()->flags); 414 clflush((void *)&current_thread_info()->flags);
@@ -419,6 +420,7 @@ static void mwait_idle(void)
419 __sti_mwait(0, 0); 420 __sti_mwait(0, 0);
420 else 421 else
421 local_irq_enable(); 422 local_irq_enable();
423 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
422 } else { 424 } else {
423 local_irq_enable(); 425 local_irq_enable();
424 } 426 }
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 71d7849a07f7..f6b916387590 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -121,11 +121,11 @@ void __show_regs(struct pt_regs *regs, int all)
121void release_thread(struct task_struct *dead_task) 121void release_thread(struct task_struct *dead_task)
122{ 122{
123 if (dead_task->mm) { 123 if (dead_task->mm) {
124 if (dead_task->mm->context.size) { 124 if (dead_task->mm->context.ldt) {
125 pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n", 125 pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
126 dead_task->comm, 126 dead_task->comm,
127 dead_task->mm->context.ldt, 127 dead_task->mm->context.ldt,
128 dead_task->mm->context.size); 128 dead_task->mm->context.ldt->size);
129 BUG(); 129 BUG();
130 } 130 }
131 } 131 }
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 206996c1669d..71820c42b6ce 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -93,8 +93,15 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
93 COPY(r15); 93 COPY(r15);
94#endif /* CONFIG_X86_64 */ 94#endif /* CONFIG_X86_64 */
95 95
96#ifdef CONFIG_X86_32
96 COPY_SEG_CPL3(cs); 97 COPY_SEG_CPL3(cs);
97 COPY_SEG_CPL3(ss); 98 COPY_SEG_CPL3(ss);
99#else /* !CONFIG_X86_32 */
100 /* Kernel saves and restores only the CS segment register on signals,
101 * which is the bare minimum needed to allow mixed 32/64-bit code.
102 * App's signal handler can save/restore other segments if needed. */
103 COPY_SEG_CPL3(cs);
104#endif /* CONFIG_X86_32 */
98 105
99 get_user_ex(tmpflags, &sc->flags); 106 get_user_ex(tmpflags, &sc->flags);
100 regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS); 107 regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
@@ -154,9 +161,8 @@ int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
154#else /* !CONFIG_X86_32 */ 161#else /* !CONFIG_X86_32 */
155 put_user_ex(regs->flags, &sc->flags); 162 put_user_ex(regs->flags, &sc->flags);
156 put_user_ex(regs->cs, &sc->cs); 163 put_user_ex(regs->cs, &sc->cs);
157 put_user_ex(0, &sc->__pad2); 164 put_user_ex(0, &sc->gs);
158 put_user_ex(0, &sc->__pad1); 165 put_user_ex(0, &sc->fs);
159 put_user_ex(regs->ss, &sc->ss);
160#endif /* CONFIG_X86_32 */ 166#endif /* CONFIG_X86_32 */
161 167
162 put_user_ex(fpstate, &sc->fpstate); 168 put_user_ex(fpstate, &sc->fpstate);
@@ -451,19 +457,9 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
451 457
452 regs->sp = (unsigned long)frame; 458 regs->sp = (unsigned long)frame;
453 459
454 /* 460 /* Set up the CS register to run signal handlers in 64-bit mode,
455 * Set up the CS and SS registers to run signal handlers in 461 even if the handler happens to be interrupting 32-bit code. */
456 * 64-bit mode, even if the handler happens to be interrupting
457 * 32-bit or 16-bit code.
458 *
459 * SS is subtle. In 64-bit mode, we don't need any particular
460 * SS descriptor, but we do need SS to be valid. It's possible
461 * that the old SS is entirely bogus -- this can happen if the
462 * signal we're trying to deliver is #GP or #SS caused by a bad
463 * SS value.
464 */
465 regs->cs = __USER_CS; 462 regs->cs = __USER_CS;
466 regs->ss = __USER_DS;
467 463
468 return 0; 464 return 0;
469} 465}
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index d3010aa79daf..b1f3ed9c7a9e 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -992,8 +992,17 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
992 992
993 common_cpu_up(cpu, tidle); 993 common_cpu_up(cpu, tidle);
994 994
995 /*
996 * We have to walk the irq descriptors to setup the vector
997 * space for the cpu which comes online. Prevent irq
998 * alloc/free across the bringup.
999 */
1000 irq_lock_sparse();
1001
995 err = do_boot_cpu(apicid, cpu, tidle); 1002 err = do_boot_cpu(apicid, cpu, tidle);
1003
996 if (err) { 1004 if (err) {
1005 irq_unlock_sparse();
997 pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu); 1006 pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
998 return -EIO; 1007 return -EIO;
999 } 1008 }
@@ -1011,6 +1020,8 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
1011 touch_nmi_watchdog(); 1020 touch_nmi_watchdog();
1012 } 1021 }
1013 1022
1023 irq_unlock_sparse();
1024
1014 return 0; 1025 return 0;
1015} 1026}
1016 1027
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
index 9b4d51d0c0d0..0ccb53a9fcd9 100644
--- a/arch/x86/kernel/step.c
+++ b/arch/x86/kernel/step.c
@@ -5,6 +5,7 @@
5#include <linux/mm.h> 5#include <linux/mm.h>
6#include <linux/ptrace.h> 6#include <linux/ptrace.h>
7#include <asm/desc.h> 7#include <asm/desc.h>
8#include <asm/mmu_context.h>
8 9
9unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs) 10unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs)
10{ 11{
@@ -27,13 +28,14 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
27 struct desc_struct *desc; 28 struct desc_struct *desc;
28 unsigned long base; 29 unsigned long base;
29 30
30 seg &= ~7UL; 31 seg >>= 3;
31 32
32 mutex_lock(&child->mm->context.lock); 33 mutex_lock(&child->mm->context.lock);
33 if (unlikely((seg >> 3) >= child->mm->context.size)) 34 if (unlikely(!child->mm->context.ldt ||
35 seg >= child->mm->context.ldt->size))
34 addr = -1L; /* bogus selector, access would fault */ 36 addr = -1L; /* bogus selector, access would fault */
35 else { 37 else {
36 desc = child->mm->context.ldt + seg; 38 desc = &child->mm->context.ldt->entries[seg];
37 base = get_desc_base(desc); 39 base = get_desc_base(desc);
38 40
39 /* 16-bit code segment? */ 41 /* 16-bit code segment? */
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 64dd46793099..2fbea2544f24 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -98,6 +98,8 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
98 best->ebx = xstate_required_size(vcpu->arch.xcr0, true); 98 best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
99 99
100 vcpu->arch.eager_fpu = use_eager_fpu() || guest_cpuid_has_mpx(vcpu); 100 vcpu->arch.eager_fpu = use_eager_fpu() || guest_cpuid_has_mpx(vcpu);
101 if (vcpu->arch.eager_fpu)
102 kvm_x86_ops->fpu_activate(vcpu);
101 103
102 /* 104 /*
103 * The existing code assumes virtual address is 48-bit in the canonical 105 * The existing code assumes virtual address is 48-bit in the canonical
diff --git a/arch/x86/kvm/iommu.c b/arch/x86/kvm/iommu.c
index 7dbced309ddb..5c520ebf6343 100644
--- a/arch/x86/kvm/iommu.c
+++ b/arch/x86/kvm/iommu.c
@@ -200,6 +200,7 @@ int kvm_assign_device(struct kvm *kvm, struct pci_dev *pdev)
200 goto out_unmap; 200 goto out_unmap;
201 } 201 }
202 202
203 kvm_arch_start_assignment(kvm);
203 pci_set_dev_assigned(pdev); 204 pci_set_dev_assigned(pdev);
204 205
205 dev_info(&pdev->dev, "kvm assign device\n"); 206 dev_info(&pdev->dev, "kvm assign device\n");
@@ -224,6 +225,7 @@ int kvm_deassign_device(struct kvm *kvm, struct pci_dev *pdev)
224 iommu_detach_device(domain, &pdev->dev); 225 iommu_detach_device(domain, &pdev->dev);
225 226
226 pci_clear_dev_assigned(pdev); 227 pci_clear_dev_assigned(pdev);
228 kvm_arch_end_assignment(kvm);
227 229
228 dev_info(&pdev->dev, "kvm deassign device\n"); 230 dev_info(&pdev->dev, "kvm deassign device\n");
229 231
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 954e98a8c2e3..2a5ca97c263b 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1595,7 +1595,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
1595 for (i = 0; i < APIC_LVT_NUM; i++) 1595 for (i = 0; i < APIC_LVT_NUM; i++)
1596 apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED); 1596 apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
1597 apic_update_lvtt(apic); 1597 apic_update_lvtt(apic);
1598 if (!(vcpu->kvm->arch.disabled_quirks & KVM_QUIRK_LINT0_REENABLED)) 1598 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
1599 apic_set_reg(apic, APIC_LVT0, 1599 apic_set_reg(apic, APIC_LVT0,
1600 SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT)); 1600 SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
1601 apic_manage_nmi_watchdog(apic, kvm_apic_get_reg(apic, APIC_LVT0)); 1601 apic_manage_nmi_watchdog(apic, kvm_apic_get_reg(apic, APIC_LVT0));
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index f807496b62c2..44171462bd2a 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2479,6 +2479,14 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
2479 return 0; 2479 return 0;
2480} 2480}
2481 2481
2482static bool kvm_is_mmio_pfn(pfn_t pfn)
2483{
2484 if (pfn_valid(pfn))
2485 return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn));
2486
2487 return true;
2488}
2489
2482static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, 2490static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2483 unsigned pte_access, int level, 2491 unsigned pte_access, int level,
2484 gfn_t gfn, pfn_t pfn, bool speculative, 2492 gfn_t gfn, pfn_t pfn, bool speculative,
@@ -2506,7 +2514,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2506 spte |= PT_PAGE_SIZE_MASK; 2514 spte |= PT_PAGE_SIZE_MASK;
2507 if (tdp_enabled) 2515 if (tdp_enabled)
2508 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, 2516 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
2509 kvm_is_reserved_pfn(pfn)); 2517 kvm_is_mmio_pfn(pfn));
2510 2518
2511 if (host_writable) 2519 if (host_writable)
2512 spte |= SPTE_HOST_WRITEABLE; 2520 spte |= SPTE_HOST_WRITEABLE;
diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
index de1d2d8062e2..9e8bf13572e6 100644
--- a/arch/x86/kvm/mtrr.c
+++ b/arch/x86/kvm/mtrr.c
@@ -120,6 +120,16 @@ static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state)
120 return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK; 120 return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK;
121} 121}
122 122
123static u8 mtrr_disabled_type(void)
124{
125 /*
126 * Intel SDM 11.11.2.2: all MTRRs are disabled when
127 * IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC
128 * memory type is applied to all of physical memory.
129 */
130 return MTRR_TYPE_UNCACHABLE;
131}
132
123/* 133/*
124* Three terms are used in the following code: 134* Three terms are used in the following code:
125* - segment, it indicates the address segments covered by fixed MTRRs. 135* - segment, it indicates the address segments covered by fixed MTRRs.
@@ -434,6 +444,8 @@ struct mtrr_iter {
434 444
435 /* output fields. */ 445 /* output fields. */
436 int mem_type; 446 int mem_type;
447 /* mtrr is completely disabled? */
448 bool mtrr_disabled;
437 /* [start, end) is not fully covered in MTRRs? */ 449 /* [start, end) is not fully covered in MTRRs? */
438 bool partial_map; 450 bool partial_map;
439 451
@@ -549,7 +561,7 @@ static void mtrr_lookup_var_next(struct mtrr_iter *iter)
549static void mtrr_lookup_start(struct mtrr_iter *iter) 561static void mtrr_lookup_start(struct mtrr_iter *iter)
550{ 562{
551 if (!mtrr_is_enabled(iter->mtrr_state)) { 563 if (!mtrr_is_enabled(iter->mtrr_state)) {
552 iter->partial_map = true; 564 iter->mtrr_disabled = true;
553 return; 565 return;
554 } 566 }
555 567
@@ -563,6 +575,7 @@ static void mtrr_lookup_init(struct mtrr_iter *iter,
563 iter->mtrr_state = mtrr_state; 575 iter->mtrr_state = mtrr_state;
564 iter->start = start; 576 iter->start = start;
565 iter->end = end; 577 iter->end = end;
578 iter->mtrr_disabled = false;
566 iter->partial_map = false; 579 iter->partial_map = false;
567 iter->fixed = false; 580 iter->fixed = false;
568 iter->range = NULL; 581 iter->range = NULL;
@@ -656,15 +669,19 @@ u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
656 return MTRR_TYPE_WRBACK; 669 return MTRR_TYPE_WRBACK;
657 } 670 }
658 671
659 /* It is not covered by MTRRs. */ 672 if (iter.mtrr_disabled)
660 if (iter.partial_map) { 673 return mtrr_disabled_type();
661 /* 674
662 * We just check one page, partially covered by MTRRs is 675 /* not contained in any MTRRs. */
663 * impossible. 676 if (type == -1)
664 */ 677 return mtrr_default_type(mtrr_state);
665 WARN_ON(type != -1); 678
666 type = mtrr_default_type(mtrr_state); 679 /*
667 } 680 * We just check one page, partially covered by MTRRs is
681 * impossible.
682 */
683 WARN_ON(iter.partial_map);
684
668 return type; 685 return type;
669} 686}
670EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type); 687EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type);
@@ -689,6 +706,9 @@ bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
689 return false; 706 return false;
690 } 707 }
691 708
709 if (iter.mtrr_disabled)
710 return true;
711
692 if (!iter.partial_map) 712 if (!iter.partial_map)
693 return true; 713 return true;
694 714
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 602b974a60a6..8e0c0844c6b9 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -865,6 +865,64 @@ static void svm_disable_lbrv(struct vcpu_svm *svm)
865 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0); 865 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
866} 866}
867 867
868#define MTRR_TYPE_UC_MINUS 7
869#define MTRR2PROTVAL_INVALID 0xff
870
871static u8 mtrr2protval[8];
872
873static u8 fallback_mtrr_type(int mtrr)
874{
875 /*
876 * WT and WP aren't always available in the host PAT. Treat
877 * them as UC and UC- respectively. Everything else should be
878 * there.
879 */
880 switch (mtrr)
881 {
882 case MTRR_TYPE_WRTHROUGH:
883 return MTRR_TYPE_UNCACHABLE;
884 case MTRR_TYPE_WRPROT:
885 return MTRR_TYPE_UC_MINUS;
886 default:
887 BUG();
888 }
889}
890
891static void build_mtrr2protval(void)
892{
893 int i;
894 u64 pat;
895
896 for (i = 0; i < 8; i++)
897 mtrr2protval[i] = MTRR2PROTVAL_INVALID;
898
899 /* Ignore the invalid MTRR types. */
900 mtrr2protval[2] = 0;
901 mtrr2protval[3] = 0;
902
903 /*
904 * Use host PAT value to figure out the mapping from guest MTRR
905 * values to nested page table PAT/PCD/PWT values. We do not
906 * want to change the host PAT value every time we enter the
907 * guest.
908 */
909 rdmsrl(MSR_IA32_CR_PAT, pat);
910 for (i = 0; i < 8; i++) {
911 u8 mtrr = pat >> (8 * i);
912
913 if (mtrr2protval[mtrr] == MTRR2PROTVAL_INVALID)
914 mtrr2protval[mtrr] = __cm_idx2pte(i);
915 }
916
917 for (i = 0; i < 8; i++) {
918 if (mtrr2protval[i] == MTRR2PROTVAL_INVALID) {
919 u8 fallback = fallback_mtrr_type(i);
920 mtrr2protval[i] = mtrr2protval[fallback];
921 BUG_ON(mtrr2protval[i] == MTRR2PROTVAL_INVALID);
922 }
923 }
924}
925
868static __init int svm_hardware_setup(void) 926static __init int svm_hardware_setup(void)
869{ 927{
870 int cpu; 928 int cpu;
@@ -931,6 +989,7 @@ static __init int svm_hardware_setup(void)
931 } else 989 } else
932 kvm_disable_tdp(); 990 kvm_disable_tdp();
933 991
992 build_mtrr2protval();
934 return 0; 993 return 0;
935 994
936err: 995err:
@@ -1085,6 +1144,39 @@ static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
1085 return target_tsc - tsc; 1144 return target_tsc - tsc;
1086} 1145}
1087 1146
1147static void svm_set_guest_pat(struct vcpu_svm *svm, u64 *g_pat)
1148{
1149 struct kvm_vcpu *vcpu = &svm->vcpu;
1150
1151 /* Unlike Intel, AMD takes the guest's CR0.CD into account.
1152 *
1153 * AMD does not have IPAT. To emulate it for the case of guests
1154 * with no assigned devices, just set everything to WB. If guests
1155 * have assigned devices, however, we cannot force WB for RAM
1156 * pages only, so use the guest PAT directly.
1157 */
1158 if (!kvm_arch_has_assigned_device(vcpu->kvm))
1159 *g_pat = 0x0606060606060606;
1160 else
1161 *g_pat = vcpu->arch.pat;
1162}
1163
1164static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
1165{
1166 u8 mtrr;
1167
1168 /*
1169 * 1. MMIO: trust guest MTRR, so same as item 3.
1170 * 2. No passthrough: always map as WB, and force guest PAT to WB as well
1171 * 3. Passthrough: can't guarantee the result, try to trust guest.
1172 */
1173 if (!is_mmio && !kvm_arch_has_assigned_device(vcpu->kvm))
1174 return 0;
1175
1176 mtrr = kvm_mtrr_get_guest_memory_type(vcpu, gfn);
1177 return mtrr2protval[mtrr];
1178}
1179
1088static void init_vmcb(struct vcpu_svm *svm, bool init_event) 1180static void init_vmcb(struct vcpu_svm *svm, bool init_event)
1089{ 1181{
1090 struct vmcb_control_area *control = &svm->vmcb->control; 1182 struct vmcb_control_area *control = &svm->vmcb->control;
@@ -1180,6 +1272,7 @@ static void init_vmcb(struct vcpu_svm *svm, bool init_event)
1180 clr_cr_intercept(svm, INTERCEPT_CR3_READ); 1272 clr_cr_intercept(svm, INTERCEPT_CR3_READ);
1181 clr_cr_intercept(svm, INTERCEPT_CR3_WRITE); 1273 clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
1182 save->g_pat = svm->vcpu.arch.pat; 1274 save->g_pat = svm->vcpu.arch.pat;
1275 svm_set_guest_pat(svm, &save->g_pat);
1183 save->cr3 = 0; 1276 save->cr3 = 0;
1184 save->cr4 = 0; 1277 save->cr4 = 0;
1185 } 1278 }
@@ -1579,7 +1672,7 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1579 * does not do it - this results in some delay at 1672 * does not do it - this results in some delay at
1580 * reboot 1673 * reboot
1581 */ 1674 */
1582 if (!(vcpu->kvm->arch.disabled_quirks & KVM_QUIRK_CD_NW_CLEARED)) 1675 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
1583 cr0 &= ~(X86_CR0_CD | X86_CR0_NW); 1676 cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
1584 svm->vmcb->save.cr0 = cr0; 1677 svm->vmcb->save.cr0 = cr0;
1585 mark_dirty(svm->vmcb, VMCB_CR); 1678 mark_dirty(svm->vmcb, VMCB_CR);
@@ -3254,6 +3347,16 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
3254 case MSR_VM_IGNNE: 3347 case MSR_VM_IGNNE:
3255 vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); 3348 vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
3256 break; 3349 break;
3350 case MSR_IA32_CR_PAT:
3351 if (npt_enabled) {
3352 if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
3353 return 1;
3354 vcpu->arch.pat = data;
3355 svm_set_guest_pat(svm, &svm->vmcb->save.g_pat);
3356 mark_dirty(svm->vmcb, VMCB_NPT);
3357 break;
3358 }
3359 /* fall through */
3257 default: 3360 default:
3258 return kvm_set_msr_common(vcpu, msr); 3361 return kvm_set_msr_common(vcpu, msr);
3259 } 3362 }
@@ -4088,11 +4191,6 @@ static bool svm_has_high_real_mode_segbase(void)
4088 return true; 4191 return true;
4089} 4192}
4090 4193
4091static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
4092{
4093 return 0;
4094}
4095
4096static void svm_cpuid_update(struct kvm_vcpu *vcpu) 4194static void svm_cpuid_update(struct kvm_vcpu *vcpu)
4097{ 4195{
4098} 4196}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index e856dd566f4c..83b7b5cd75d5 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -8632,22 +8632,17 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
8632 u64 ipat = 0; 8632 u64 ipat = 0;
8633 8633
8634 /* For VT-d and EPT combination 8634 /* For VT-d and EPT combination
8635 * 1. MMIO: always map as UC 8635 * 1. MMIO: guest may want to apply WC, trust it.
8636 * 2. EPT with VT-d: 8636 * 2. EPT with VT-d:
8637 * a. VT-d without snooping control feature: can't guarantee the 8637 * a. VT-d without snooping control feature: can't guarantee the
8638 * result, try to trust guest. 8638 * result, try to trust guest. So the same as item 1.
8639 * b. VT-d with snooping control feature: snooping control feature of 8639 * b. VT-d with snooping control feature: snooping control feature of
8640 * VT-d engine can guarantee the cache correctness. Just set it 8640 * VT-d engine can guarantee the cache correctness. Just set it
8641 * to WB to keep consistent with host. So the same as item 3. 8641 * to WB to keep consistent with host. So the same as item 3.
8642 * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep 8642 * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep
8643 * consistent with host MTRR 8643 * consistent with host MTRR
8644 */ 8644 */
8645 if (is_mmio) { 8645 if (!is_mmio && !kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
8646 cache = MTRR_TYPE_UNCACHABLE;
8647 goto exit;
8648 }
8649
8650 if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
8651 ipat = VMX_EPT_IPAT_BIT; 8646 ipat = VMX_EPT_IPAT_BIT;
8652 cache = MTRR_TYPE_WRBACK; 8647 cache = MTRR_TYPE_WRBACK;
8653 goto exit; 8648 goto exit;
@@ -8655,7 +8650,10 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
8655 8650
8656 if (kvm_read_cr0(vcpu) & X86_CR0_CD) { 8651 if (kvm_read_cr0(vcpu) & X86_CR0_CD) {
8657 ipat = VMX_EPT_IPAT_BIT; 8652 ipat = VMX_EPT_IPAT_BIT;
8658 cache = MTRR_TYPE_UNCACHABLE; 8653 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
8654 cache = MTRR_TYPE_WRBACK;
8655 else
8656 cache = MTRR_TYPE_UNCACHABLE;
8659 goto exit; 8657 goto exit;
8660 } 8658 }
8661 8659
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index bbaf44e8f0d3..8f0f6eca69da 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2105,7 +2105,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2105 if (guest_cpuid_has_tsc_adjust(vcpu)) { 2105 if (guest_cpuid_has_tsc_adjust(vcpu)) {
2106 if (!msr_info->host_initiated) { 2106 if (!msr_info->host_initiated) {
2107 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; 2107 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
2108 kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true); 2108 adjust_tsc_offset_guest(vcpu, adj);
2109 } 2109 }
2110 vcpu->arch.ia32_tsc_adjust_msr = data; 2110 vcpu->arch.ia32_tsc_adjust_msr = data;
2111 } 2111 }
@@ -3157,8 +3157,7 @@ static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
3157 cpuid_count(XSTATE_CPUID, index, 3157 cpuid_count(XSTATE_CPUID, index,
3158 &size, &offset, &ecx, &edx); 3158 &size, &offset, &ecx, &edx);
3159 memcpy(dest, src + offset, size); 3159 memcpy(dest, src + offset, size);
3160 } else 3160 }
3161 WARN_ON_ONCE(1);
3162 3161
3163 valid -= feature; 3162 valid -= feature;
3164 } 3163 }
@@ -6328,6 +6327,7 @@ static void process_smi_save_state_64(struct kvm_vcpu *vcpu, char *buf)
6328static void process_smi(struct kvm_vcpu *vcpu) 6327static void process_smi(struct kvm_vcpu *vcpu)
6329{ 6328{
6330 struct kvm_segment cs, ds; 6329 struct kvm_segment cs, ds;
6330 struct desc_ptr dt;
6331 char buf[512]; 6331 char buf[512];
6332 u32 cr0; 6332 u32 cr0;
6333 6333
@@ -6360,6 +6360,10 @@ static void process_smi(struct kvm_vcpu *vcpu)
6360 6360
6361 kvm_x86_ops->set_cr4(vcpu, 0); 6361 kvm_x86_ops->set_cr4(vcpu, 0);
6362 6362
6363 /* Undocumented: IDT limit is set to zero on entry to SMM. */
6364 dt.address = dt.size = 0;
6365 kvm_x86_ops->set_idt(vcpu, &dt);
6366
6363 __kvm_set_dr(vcpu, 7, DR7_FIXED_1); 6367 __kvm_set_dr(vcpu, 7, DR7_FIXED_1);
6364 6368
6365 cs.selector = (vcpu->arch.smbase >> 4) & 0xffff; 6369 cs.selector = (vcpu->arch.smbase >> 4) & 0xffff;
@@ -7315,11 +7319,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
7315 7319
7316 vcpu = kvm_x86_ops->vcpu_create(kvm, id); 7320 vcpu = kvm_x86_ops->vcpu_create(kvm, id);
7317 7321
7318 /*
7319 * Activate fpu unconditionally in case the guest needs eager FPU. It will be
7320 * deactivated soon if it doesn't.
7321 */
7322 kvm_x86_ops->fpu_activate(vcpu);
7323 return vcpu; 7322 return vcpu;
7324} 7323}
7325 7324
@@ -8218,6 +8217,24 @@ bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
8218 kvm_x86_ops->interrupt_allowed(vcpu); 8217 kvm_x86_ops->interrupt_allowed(vcpu);
8219} 8218}
8220 8219
8220void kvm_arch_start_assignment(struct kvm *kvm)
8221{
8222 atomic_inc(&kvm->arch.assigned_device_count);
8223}
8224EXPORT_SYMBOL_GPL(kvm_arch_start_assignment);
8225
8226void kvm_arch_end_assignment(struct kvm *kvm)
8227{
8228 atomic_dec(&kvm->arch.assigned_device_count);
8229}
8230EXPORT_SYMBOL_GPL(kvm_arch_end_assignment);
8231
8232bool kvm_arch_has_assigned_device(struct kvm *kvm)
8233{
8234 return atomic_read(&kvm->arch.assigned_device_count);
8235}
8236EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device);
8237
8221void kvm_arch_register_noncoherent_dma(struct kvm *kvm) 8238void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
8222{ 8239{
8223 atomic_inc(&kvm->arch.noncoherent_dma_count); 8240 atomic_inc(&kvm->arch.noncoherent_dma_count);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index edc8cdcd786b..0ca2f3e4803c 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -147,6 +147,11 @@ static inline void kvm_register_writel(struct kvm_vcpu *vcpu,
147 return kvm_register_write(vcpu, reg, val); 147 return kvm_register_write(vcpu, reg, val);
148} 148}
149 149
150static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
151{
152 return !(kvm->arch.disabled_quirks & quirk);
153}
154
150void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); 155void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
151void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); 156void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
152void kvm_set_pending_timer(struct kvm_vcpu *vcpu); 157void kvm_set_pending_timer(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c
index f37e84ab49f3..3d8f2e421466 100644
--- a/arch/x86/math-emu/fpu_entry.c
+++ b/arch/x86/math-emu/fpu_entry.c
@@ -29,7 +29,6 @@
29 29
30#include <asm/uaccess.h> 30#include <asm/uaccess.h>
31#include <asm/traps.h> 31#include <asm/traps.h>
32#include <asm/desc.h>
33#include <asm/user.h> 32#include <asm/user.h>
34#include <asm/fpu/internal.h> 33#include <asm/fpu/internal.h>
35 34
@@ -181,7 +180,7 @@ void math_emulate(struct math_emu_info *info)
181 math_abort(FPU_info, SIGILL); 180 math_abort(FPU_info, SIGILL);
182 } 181 }
183 182
184 code_descriptor = LDT_DESCRIPTOR(FPU_CS); 183 code_descriptor = FPU_get_ldt_descriptor(FPU_CS);
185 if (SEG_D_SIZE(code_descriptor)) { 184 if (SEG_D_SIZE(code_descriptor)) {
186 /* The above test may be wrong, the book is not clear */ 185 /* The above test may be wrong, the book is not clear */
187 /* Segmented 32 bit protected mode */ 186 /* Segmented 32 bit protected mode */
diff --git a/arch/x86/math-emu/fpu_system.h b/arch/x86/math-emu/fpu_system.h
index 9ccecb61a4fa..5e044d506b7a 100644
--- a/arch/x86/math-emu/fpu_system.h
+++ b/arch/x86/math-emu/fpu_system.h
@@ -16,9 +16,24 @@
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/mm.h> 17#include <linux/mm.h>
18 18
19/* s is always from a cpu register, and the cpu does bounds checking 19#include <asm/desc.h>
20 * during register load --> no further bounds checks needed */ 20#include <asm/mmu_context.h>
21#define LDT_DESCRIPTOR(s) (((struct desc_struct *)current->mm->context.ldt)[(s) >> 3]) 21
22static inline struct desc_struct FPU_get_ldt_descriptor(unsigned seg)
23{
24 static struct desc_struct zero_desc;
25 struct desc_struct ret = zero_desc;
26
27#ifdef CONFIG_MODIFY_LDT_SYSCALL
28 seg >>= 3;
29 mutex_lock(&current->mm->context.lock);
30 if (current->mm->context.ldt && seg < current->mm->context.ldt->size)
31 ret = current->mm->context.ldt->entries[seg];
32 mutex_unlock(&current->mm->context.lock);
33#endif
34 return ret;
35}
36
22#define SEG_D_SIZE(x) ((x).b & (3 << 21)) 37#define SEG_D_SIZE(x) ((x).b & (3 << 21))
23#define SEG_G_BIT(x) ((x).b & (1 << 23)) 38#define SEG_G_BIT(x) ((x).b & (1 << 23))
24#define SEG_GRANULARITY(x) (((x).b & (1 << 23)) ? 4096 : 1) 39#define SEG_GRANULARITY(x) (((x).b & (1 << 23)) ? 4096 : 1)
diff --git a/arch/x86/math-emu/get_address.c b/arch/x86/math-emu/get_address.c
index 6ef5e99380f9..8300db71c2a6 100644
--- a/arch/x86/math-emu/get_address.c
+++ b/arch/x86/math-emu/get_address.c
@@ -20,7 +20,6 @@
20#include <linux/stddef.h> 20#include <linux/stddef.h>
21 21
22#include <asm/uaccess.h> 22#include <asm/uaccess.h>
23#include <asm/desc.h>
24 23
25#include "fpu_system.h" 24#include "fpu_system.h"
26#include "exception.h" 25#include "exception.h"
@@ -158,7 +157,7 @@ static long pm_address(u_char FPU_modrm, u_char segment,
158 addr->selector = PM_REG_(segment); 157 addr->selector = PM_REG_(segment);
159 } 158 }
160 159
161 descriptor = LDT_DESCRIPTOR(PM_REG_(segment)); 160 descriptor = FPU_get_ldt_descriptor(addr->selector);
162 base_address = SEG_BASE_ADDR(descriptor); 161 base_address = SEG_BASE_ADDR(descriptor);
163 address = base_address + offset; 162 address = base_address + offset;
164 limit = base_address 163 limit = base_address
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index cc5ccc415cc0..b9c78f3bcd67 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -63,8 +63,6 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
63 !PageReserved(pfn_to_page(start_pfn + i))) 63 !PageReserved(pfn_to_page(start_pfn + i)))
64 return 1; 64 return 1;
65 65
66 WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
67
68 return 0; 66 return 0;
69} 67}
70 68
@@ -94,7 +92,6 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
94 pgprot_t prot; 92 pgprot_t prot;
95 int retval; 93 int retval;
96 void __iomem *ret_addr; 94 void __iomem *ret_addr;
97 int ram_region;
98 95
99 /* Don't allow wraparound or zero size */ 96 /* Don't allow wraparound or zero size */
100 last_addr = phys_addr + size - 1; 97 last_addr = phys_addr + size - 1;
@@ -117,23 +114,15 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
117 /* 114 /*
118 * Don't allow anybody to remap normal RAM that we're using.. 115 * Don't allow anybody to remap normal RAM that we're using..
119 */ 116 */
120 /* First check if whole region can be identified as RAM or not */ 117 pfn = phys_addr >> PAGE_SHIFT;
121 ram_region = region_is_ram(phys_addr, size); 118 last_pfn = last_addr >> PAGE_SHIFT;
122 if (ram_region > 0) { 119 if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
123 WARN_ONCE(1, "ioremap on RAM at 0x%lx - 0x%lx\n", 120 __ioremap_check_ram) == 1) {
124 (unsigned long int)phys_addr, 121 WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
125 (unsigned long int)last_addr); 122 &phys_addr, &last_addr);
126 return NULL; 123 return NULL;
127 } 124 }
128 125
129 /* If could not be identified(-1), check page by page */
130 if (ram_region < 0) {
131 pfn = phys_addr >> PAGE_SHIFT;
132 last_pfn = last_addr >> PAGE_SHIFT;
133 if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
134 __ioremap_check_ram) == 1)
135 return NULL;
136 }
137 /* 126 /*
138 * Mappings have to be page-aligned 127 * Mappings have to be page-aligned
139 */ 128 */
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 9d518d693b4b..844b06d67df4 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -126,3 +126,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
126 mm->get_unmapped_area = arch_get_unmapped_area_topdown; 126 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
127 } 127 }
128} 128}
129
130const char *arch_vma_name(struct vm_area_struct *vma)
131{
132 if (vma->vm_flags & VM_MPX)
133 return "[mpx]";
134 return NULL;
135}
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index 7a657f58bbea..db1b0bc5017c 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -20,20 +20,6 @@
20#define CREATE_TRACE_POINTS 20#define CREATE_TRACE_POINTS
21#include <asm/trace/mpx.h> 21#include <asm/trace/mpx.h>
22 22
23static const char *mpx_mapping_name(struct vm_area_struct *vma)
24{
25 return "[mpx]";
26}
27
28static struct vm_operations_struct mpx_vma_ops = {
29 .name = mpx_mapping_name,
30};
31
32static int is_mpx_vma(struct vm_area_struct *vma)
33{
34 return (vma->vm_ops == &mpx_vma_ops);
35}
36
37static inline unsigned long mpx_bd_size_bytes(struct mm_struct *mm) 23static inline unsigned long mpx_bd_size_bytes(struct mm_struct *mm)
38{ 24{
39 if (is_64bit_mm(mm)) 25 if (is_64bit_mm(mm))
@@ -53,9 +39,6 @@ static inline unsigned long mpx_bt_size_bytes(struct mm_struct *mm)
53/* 39/*
54 * This is really a simplified "vm_mmap". it only handles MPX 40 * This is really a simplified "vm_mmap". it only handles MPX
55 * bounds tables (the bounds directory is user-allocated). 41 * bounds tables (the bounds directory is user-allocated).
56 *
57 * Later on, we use the vma->vm_ops to uniquely identify these
58 * VMAs.
59 */ 42 */
60static unsigned long mpx_mmap(unsigned long len) 43static unsigned long mpx_mmap(unsigned long len)
61{ 44{
@@ -101,7 +84,6 @@ static unsigned long mpx_mmap(unsigned long len)
101 ret = -ENOMEM; 84 ret = -ENOMEM;
102 goto out; 85 goto out;
103 } 86 }
104 vma->vm_ops = &mpx_vma_ops;
105 87
106 if (vm_flags & VM_LOCKED) { 88 if (vm_flags & VM_LOCKED) {
107 up_write(&mm->mmap_sem); 89 up_write(&mm->mmap_sem);
@@ -812,7 +794,7 @@ static noinline int zap_bt_entries_mapping(struct mm_struct *mm,
812 * so stop immediately and return an error. This 794 * so stop immediately and return an error. This
813 * probably results in a SIGSEGV. 795 * probably results in a SIGSEGV.
814 */ 796 */
815 if (!is_mpx_vma(vma)) 797 if (!(vma->vm_flags & VM_MPX))
816 return -EINVAL; 798 return -EINVAL;
817 799
818 len = min(vma->vm_end, end) - addr; 800 len = min(vma->vm_end, end) - addr;
@@ -945,9 +927,9 @@ static int try_unmap_single_bt(struct mm_struct *mm,
945 * lots of tables even though we have no actual table 927 * lots of tables even though we have no actual table
946 * entries in use. 928 * entries in use.
947 */ 929 */
948 while (next && is_mpx_vma(next)) 930 while (next && (next->vm_flags & VM_MPX))
949 next = next->vm_next; 931 next = next->vm_next;
950 while (prev && is_mpx_vma(prev)) 932 while (prev && (prev->vm_flags & VM_MPX))
951 prev = prev->vm_prev; 933 prev = prev->vm_prev;
952 /* 934 /*
953 * We know 'start' and 'end' lie within an area controlled 935 * We know 'start' and 'end' lie within an area controlled
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 3250f2371aea..90b924acd982 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -117,7 +117,7 @@ static void flush_tlb_func(void *info)
117 } else { 117 } else {
118 unsigned long addr; 118 unsigned long addr;
119 unsigned long nr_pages = 119 unsigned long nr_pages =
120 f->flush_end - f->flush_start / PAGE_SIZE; 120 (f->flush_end - f->flush_start) / PAGE_SIZE;
121 addr = f->flush_start; 121 addr = f->flush_start;
122 while (addr < f->flush_end) { 122 while (addr < f->flush_end) {
123 __flush_tlb_single(addr); 123 __flush_tlb_single(addr);
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 579a8fd74be0..be2e7a2b10d7 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -269,7 +269,7 @@ static void emit_bpf_tail_call(u8 **pprog)
269 EMIT4(0x48, 0x8B, 0x46, /* mov rax, qword ptr [rsi + 16] */ 269 EMIT4(0x48, 0x8B, 0x46, /* mov rax, qword ptr [rsi + 16] */
270 offsetof(struct bpf_array, map.max_entries)); 270 offsetof(struct bpf_array, map.max_entries));
271 EMIT3(0x48, 0x39, 0xD0); /* cmp rax, rdx */ 271 EMIT3(0x48, 0x39, 0xD0); /* cmp rax, rdx */
272#define OFFSET1 44 /* number of bytes to jump */ 272#define OFFSET1 47 /* number of bytes to jump */
273 EMIT2(X86_JBE, OFFSET1); /* jbe out */ 273 EMIT2(X86_JBE, OFFSET1); /* jbe out */
274 label1 = cnt; 274 label1 = cnt;
275 275
@@ -278,15 +278,15 @@ static void emit_bpf_tail_call(u8 **pprog)
278 */ 278 */
279 EMIT2_off32(0x8B, 0x85, -STACKSIZE + 36); /* mov eax, dword ptr [rbp - 516] */ 279 EMIT2_off32(0x8B, 0x85, -STACKSIZE + 36); /* mov eax, dword ptr [rbp - 516] */
280 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ 280 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
281#define OFFSET2 33 281#define OFFSET2 36
282 EMIT2(X86_JA, OFFSET2); /* ja out */ 282 EMIT2(X86_JA, OFFSET2); /* ja out */
283 label2 = cnt; 283 label2 = cnt;
284 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ 284 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
285 EMIT2_off32(0x89, 0x85, -STACKSIZE + 36); /* mov dword ptr [rbp - 516], eax */ 285 EMIT2_off32(0x89, 0x85, -STACKSIZE + 36); /* mov dword ptr [rbp - 516], eax */
286 286
287 /* prog = array->prog[index]; */ 287 /* prog = array->prog[index]; */
288 EMIT4(0x48, 0x8D, 0x44, 0xD6); /* lea rax, [rsi + rdx * 8 + 0x50] */ 288 EMIT4_off32(0x48, 0x8D, 0x84, 0xD6, /* lea rax, [rsi + rdx * 8 + offsetof(...)] */
289 EMIT1(offsetof(struct bpf_array, prog)); 289 offsetof(struct bpf_array, prog));
290 EMIT3(0x48, 0x8B, 0x00); /* mov rax, qword ptr [rax] */ 290 EMIT3(0x48, 0x8B, 0x00); /* mov rax, qword ptr [rax] */
291 291
292 /* if (prog == NULL) 292 /* if (prog == NULL)
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index cfba30f27392..e4308fe6afe8 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -972,6 +972,11 @@ u64 efi_mem_attributes(unsigned long phys_addr)
972 972
973static int __init arch_parse_efi_cmdline(char *str) 973static int __init arch_parse_efi_cmdline(char *str)
974{ 974{
975 if (!str) {
976 pr_warn("need at least one option\n");
977 return -EINVAL;
978 }
979
975 if (parse_option_str(str, "old_map")) 980 if (parse_option_str(str, "old_map"))
976 set_bit(EFI_OLD_MEMMAP, &efi.flags); 981 set_bit(EFI_OLD_MEMMAP, &efi.flags);
977 if (parse_option_str(str, "debug")) 982 if (parse_option_str(str, "debug"))
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 0d7dd1f5ac36..9ab52791fed5 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -22,6 +22,7 @@
22#include <asm/fpu/internal.h> 22#include <asm/fpu/internal.h>
23#include <asm/debugreg.h> 23#include <asm/debugreg.h>
24#include <asm/cpu.h> 24#include <asm/cpu.h>
25#include <asm/mmu_context.h>
25 26
26#ifdef CONFIG_X86_32 27#ifdef CONFIG_X86_32
27__visible unsigned long saved_context_ebx; 28__visible unsigned long saved_context_ebx;
@@ -153,7 +154,7 @@ static void fix_processor_context(void)
153 syscall_init(); /* This sets MSR_*STAR and related */ 154 syscall_init(); /* This sets MSR_*STAR and related */
154#endif 155#endif
155 load_TR_desc(); /* This does ltr */ 156 load_TR_desc(); /* This does ltr */
156 load_LDT(&current->active_mm->context); /* This does lldt */ 157 load_mm_ldt(current->active_mm); /* This does lldt */
157 158
158 fpu__resume_cpu(); 159 fpu__resume_cpu();
159} 160}
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
index e88fda867a33..484145368a24 100644
--- a/arch/x86/xen/Kconfig
+++ b/arch/x86/xen/Kconfig
@@ -8,7 +8,7 @@ config XEN
8 select PARAVIRT_CLOCK 8 select PARAVIRT_CLOCK
9 select XEN_HAVE_PVMMU 9 select XEN_HAVE_PVMMU
10 depends on X86_64 || (X86_32 && X86_PAE) 10 depends on X86_64 || (X86_32 && X86_PAE)
11 depends on X86_TSC 11 depends on X86_LOCAL_APIC && X86_TSC
12 help 12 help
13 This is the Linux Xen port. Enabling this will allow the 13 This is the Linux Xen port. Enabling this will allow the
14 kernel to boot in a paravirtualized environment under the 14 kernel to boot in a paravirtualized environment under the
@@ -17,7 +17,7 @@ config XEN
17config XEN_DOM0 17config XEN_DOM0
18 def_bool y 18 def_bool y
19 depends on XEN && PCI_XEN && SWIOTLB_XEN 19 depends on XEN && PCI_XEN && SWIOTLB_XEN
20 depends on X86_LOCAL_APIC && X86_IO_APIC && ACPI && PCI 20 depends on X86_IO_APIC && ACPI && PCI
21 21
22config XEN_PVHVM 22config XEN_PVHVM
23 def_bool y 23 def_bool y
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index 7322755f337a..4b6e29ac0968 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -13,13 +13,13 @@ CFLAGS_mmu.o := $(nostackp)
13obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \ 13obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \
14 time.o xen-asm.o xen-asm_$(BITS).o \ 14 time.o xen-asm.o xen-asm_$(BITS).o \
15 grant-table.o suspend.o platform-pci-unplug.o \ 15 grant-table.o suspend.o platform-pci-unplug.o \
16 p2m.o 16 p2m.o apic.o
17 17
18obj-$(CONFIG_EVENT_TRACING) += trace.o 18obj-$(CONFIG_EVENT_TRACING) += trace.o
19 19
20obj-$(CONFIG_SMP) += smp.o 20obj-$(CONFIG_SMP) += smp.o
21obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o 21obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
22obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o 22obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o
23obj-$(CONFIG_XEN_DOM0) += apic.o vga.o 23obj-$(CONFIG_XEN_DOM0) += vga.o
24obj-$(CONFIG_SWIOTLB_XEN) += pci-swiotlb-xen.o 24obj-$(CONFIG_SWIOTLB_XEN) += pci-swiotlb-xen.o
25obj-$(CONFIG_XEN_EFI) += efi.o 25obj-$(CONFIG_XEN_EFI) += efi.o
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 0b95c9b8283f..11d6fb4e8483 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -483,6 +483,7 @@ static void set_aliased_prot(void *v, pgprot_t prot)
483 pte_t pte; 483 pte_t pte;
484 unsigned long pfn; 484 unsigned long pfn;
485 struct page *page; 485 struct page *page;
486 unsigned char dummy;
486 487
487 ptep = lookup_address((unsigned long)v, &level); 488 ptep = lookup_address((unsigned long)v, &level);
488 BUG_ON(ptep == NULL); 489 BUG_ON(ptep == NULL);
@@ -492,6 +493,32 @@ static void set_aliased_prot(void *v, pgprot_t prot)
492 493
493 pte = pfn_pte(pfn, prot); 494 pte = pfn_pte(pfn, prot);
494 495
496 /*
497 * Careful: update_va_mapping() will fail if the virtual address
498 * we're poking isn't populated in the page tables. We don't
499 * need to worry about the direct map (that's always in the page
500 * tables), but we need to be careful about vmap space. In
501 * particular, the top level page table can lazily propagate
502 * entries between processes, so if we've switched mms since we
503 * vmapped the target in the first place, we might not have the
504 * top-level page table entry populated.
505 *
506 * We disable preemption because we want the same mm active when
507 * we probe the target and when we issue the hypercall. We'll
508 * have the same nominal mm, but if we're a kernel thread, lazy
509 * mm dropping could change our pgd.
510 *
511 * Out of an abundance of caution, this uses __get_user() to fault
512 * in the target address just in case there's some obscure case
513 * in which the target address isn't readable.
514 */
515
516 preempt_disable();
517
518 pagefault_disable(); /* Avoid warnings due to being atomic. */
519 __get_user(dummy, (unsigned char __user __force *)v);
520 pagefault_enable();
521
495 if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0)) 522 if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
496 BUG(); 523 BUG();
497 524
@@ -503,6 +530,8 @@ static void set_aliased_prot(void *v, pgprot_t prot)
503 BUG(); 530 BUG();
504 } else 531 } else
505 kmap_flush_unused(); 532 kmap_flush_unused();
533
534 preempt_enable();
506} 535}
507 536
508static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries) 537static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
@@ -510,6 +539,17 @@ static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
510 const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE; 539 const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
511 int i; 540 int i;
512 541
542 /*
543 * We need to mark the all aliases of the LDT pages RO. We
544 * don't need to call vm_flush_aliases(), though, since that's
545 * only responsible for flushing aliases out the TLBs, not the
546 * page tables, and Xen will flush the TLB for us if needed.
547 *
548 * To avoid confusing future readers: none of this is necessary
549 * to load the LDT. The hypervisor only checks this when the
550 * LDT is faulted in due to subsequent descriptor access.
551 */
552
513 for(i = 0; i < entries; i += entries_per_page) 553 for(i = 0; i < entries; i += entries_per_page)
514 set_aliased_prot(ldt + i, PAGE_KERNEL_RO); 554 set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
515} 555}
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index c20fe29e65f4..2292721b1d10 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -101,17 +101,15 @@ struct dom0_vga_console_info;
101 101
102#ifdef CONFIG_XEN_DOM0 102#ifdef CONFIG_XEN_DOM0
103void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size); 103void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size);
104void __init xen_init_apic(void);
105#else 104#else
106static inline void __init xen_init_vga(const struct dom0_vga_console_info *info, 105static inline void __init xen_init_vga(const struct dom0_vga_console_info *info,
107 size_t size) 106 size_t size)
108{ 107{
109} 108}
110static inline void __init xen_init_apic(void)
111{
112}
113#endif 109#endif
114 110
111void __init xen_init_apic(void);
112
115#ifdef CONFIG_XEN_EFI 113#ifdef CONFIG_XEN_EFI
116extern void xen_efi_init(void); 114extern void xen_efi_init(void);
117#else 115#else
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
index 14d15bf1a95b..5b478accd5fc 100644
--- a/arch/xtensa/include/asm/Kbuild
+++ b/arch/xtensa/include/asm/Kbuild
@@ -19,6 +19,7 @@ generic-y += linkage.h
19generic-y += local.h 19generic-y += local.h
20generic-y += local64.h 20generic-y += local64.h
21generic-y += mcs_spinlock.h 21generic-y += mcs_spinlock.h
22generic-y += mm-arch-hooks.h
22generic-y += percpu.h 23generic-y += percpu.h
23generic-y += preempt.h 24generic-y += preempt.h
24generic-y += resource.h 25generic-y += resource.h
diff --git a/arch/xtensa/include/asm/mm-arch-hooks.h b/arch/xtensa/include/asm/mm-arch-hooks.h
deleted file mode 100644
index d2e5cfd3dd02..000000000000
--- a/arch/xtensa/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_XTENSA_MM_ARCH_HOOKS_H
13#define _ASM_XTENSA_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_XTENSA_MM_ARCH_HOOKS_H */
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 0436c21db7f2..719b7152aed1 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -51,7 +51,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
51 unsigned long idx = BIO_POOL_NONE; 51 unsigned long idx = BIO_POOL_NONE;
52 unsigned inline_vecs; 52 unsigned inline_vecs;
53 53
54 if (!bs) { 54 if (!bs || !bs->bio_integrity_pool) {
55 bip = kmalloc(sizeof(struct bio_integrity_payload) + 55 bip = kmalloc(sizeof(struct bio_integrity_payload) +
56 sizeof(struct bio_vec) * nr_vecs, gfp_mask); 56 sizeof(struct bio_vec) * nr_vecs, gfp_mask);
57 inline_vecs = nr_vecs; 57 inline_vecs = nr_vecs;
@@ -104,7 +104,7 @@ void bio_integrity_free(struct bio *bio)
104 kfree(page_address(bip->bip_vec->bv_page) + 104 kfree(page_address(bip->bip_vec->bv_page) +
105 bip->bip_vec->bv_offset); 105 bip->bip_vec->bv_offset);
106 106
107 if (bs) { 107 if (bs && bs->bio_integrity_pool) {
108 if (bip->bip_slab != BIO_POOL_NONE) 108 if (bip->bip_slab != BIO_POOL_NONE)
109 bvec_free(bs->bvec_integrity_pool, bip->bip_vec, 109 bvec_free(bs->bvec_integrity_pool, bip->bip_vec,
110 bip->bip_slab); 110 bip->bip_slab);
diff --git a/block/bio.c b/block/bio.c
index 2a00d349cd68..d6e5ba3399f0 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1831,8 +1831,9 @@ EXPORT_SYMBOL(bio_endio);
1831 * Allocates and returns a new bio which represents @sectors from the start of 1831 * Allocates and returns a new bio which represents @sectors from the start of
1832 * @bio, and updates @bio to represent the remaining sectors. 1832 * @bio, and updates @bio to represent the remaining sectors.
1833 * 1833 *
1834 * The newly allocated bio will point to @bio's bi_io_vec; it is the caller's 1834 * Unless this is a discard request the newly allocated bio will point
1835 * responsibility to ensure that @bio is not freed before the split. 1835 * to @bio's bi_io_vec; it is the caller's responsibility to ensure that
1836 * @bio is not freed before the split.
1836 */ 1837 */
1837struct bio *bio_split(struct bio *bio, int sectors, 1838struct bio *bio_split(struct bio *bio, int sectors,
1838 gfp_t gfp, struct bio_set *bs) 1839 gfp_t gfp, struct bio_set *bs)
@@ -1842,7 +1843,15 @@ struct bio *bio_split(struct bio *bio, int sectors,
1842 BUG_ON(sectors <= 0); 1843 BUG_ON(sectors <= 0);
1843 BUG_ON(sectors >= bio_sectors(bio)); 1844 BUG_ON(sectors >= bio_sectors(bio));
1844 1845
1845 split = bio_clone_fast(bio, gfp, bs); 1846 /*
1847 * Discards need a mutable bio_vec to accommodate the payload
1848 * required by the DSM TRIM and UNMAP commands.
1849 */
1850 if (bio->bi_rw & REQ_DISCARD)
1851 split = bio_clone_bioset(bio, gfp, bs);
1852 else
1853 split = bio_clone_fast(bio, gfp, bs);
1854
1846 if (!split) 1855 if (!split)
1847 return NULL; 1856 return NULL;
1848 1857
@@ -2009,6 +2018,7 @@ int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css)
2009 bio->bi_css = blkcg_css; 2018 bio->bi_css = blkcg_css;
2010 return 0; 2019 return 0;
2011} 2020}
2021EXPORT_SYMBOL_GPL(bio_associate_blkcg);
2012 2022
2013/** 2023/**
2014 * bio_associate_current - associate a bio with %current 2024 * bio_associate_current - associate a bio with %current
@@ -2039,6 +2049,7 @@ int bio_associate_current(struct bio *bio)
2039 bio->bi_css = task_get_css(current, blkio_cgrp_id); 2049 bio->bi_css = task_get_css(current, blkio_cgrp_id);
2040 return 0; 2050 return 0;
2041} 2051}
2052EXPORT_SYMBOL_GPL(bio_associate_current);
2042 2053
2043/** 2054/**
2044 * bio_disassociate_task - undo bio_associate_current() 2055 * bio_disassociate_task - undo bio_associate_current()
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 9f97da52d006..d6283b3f5db5 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -29,6 +29,14 @@
29 29
30#define MAX_KEY_LEN 100 30#define MAX_KEY_LEN 100
31 31
32/*
33 * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
34 * blkcg_pol_register_mutex nests outside of it and synchronizes entire
35 * policy [un]register operations including cgroup file additions /
36 * removals. Putting cgroup file registration outside blkcg_pol_mutex
37 * allows grabbing it from cgroup callbacks.
38 */
39static DEFINE_MUTEX(blkcg_pol_register_mutex);
32static DEFINE_MUTEX(blkcg_pol_mutex); 40static DEFINE_MUTEX(blkcg_pol_mutex);
33 41
34struct blkcg blkcg_root; 42struct blkcg blkcg_root;
@@ -38,6 +46,8 @@ struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css;
38 46
39static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS]; 47static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
40 48
49static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */
50
41static bool blkcg_policy_enabled(struct request_queue *q, 51static bool blkcg_policy_enabled(struct request_queue *q,
42 const struct blkcg_policy *pol) 52 const struct blkcg_policy *pol)
43{ 53{
@@ -453,20 +463,7 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
453 struct blkcg_gq *blkg; 463 struct blkcg_gq *blkg;
454 int i; 464 int i;
455 465
456 /* 466 mutex_lock(&blkcg_pol_mutex);
457 * XXX: We invoke cgroup_add/rm_cftypes() under blkcg_pol_mutex
458 * which ends up putting cgroup's internal cgroup_tree_mutex under
459 * it; however, cgroup_tree_mutex is nested above cgroup file
460 * active protection and grabbing blkcg_pol_mutex from a cgroup
461 * file operation creates a possible circular dependency. cgroup
462 * internal locking is planned to go through further simplification
463 * and this issue should go away soon. For now, let's trylock
464 * blkcg_pol_mutex and restart the write on failure.
465 *
466 * http://lkml.kernel.org/g/5363C04B.4010400@oracle.com
467 */
468 if (!mutex_trylock(&blkcg_pol_mutex))
469 return restart_syscall();
470 spin_lock_irq(&blkcg->lock); 467 spin_lock_irq(&blkcg->lock);
471 468
472 /* 469 /*
@@ -721,8 +718,12 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
721 return -EINVAL; 718 return -EINVAL;
722 719
723 disk = get_gendisk(MKDEV(major, minor), &part); 720 disk = get_gendisk(MKDEV(major, minor), &part);
724 if (!disk || part) 721 if (!disk)
722 return -EINVAL;
723 if (part) {
724 put_disk(disk);
725 return -EINVAL; 725 return -EINVAL;
726 }
726 727
727 rcu_read_lock(); 728 rcu_read_lock();
728 spin_lock_irq(disk->queue->queue_lock); 729 spin_lock_irq(disk->queue->queue_lock);
@@ -822,8 +823,17 @@ static void blkcg_css_free(struct cgroup_subsys_state *css)
822{ 823{
823 struct blkcg *blkcg = css_to_blkcg(css); 824 struct blkcg *blkcg = css_to_blkcg(css);
824 825
825 if (blkcg != &blkcg_root) 826 mutex_lock(&blkcg_pol_mutex);
827 list_del(&blkcg->all_blkcgs_node);
828 mutex_unlock(&blkcg_pol_mutex);
829
830 if (blkcg != &blkcg_root) {
831 int i;
832
833 for (i = 0; i < BLKCG_MAX_POLS; i++)
834 kfree(blkcg->pd[i]);
826 kfree(blkcg); 835 kfree(blkcg);
836 }
827} 837}
828 838
829static struct cgroup_subsys_state * 839static struct cgroup_subsys_state *
@@ -833,6 +843,8 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
833 struct cgroup_subsys_state *ret; 843 struct cgroup_subsys_state *ret;
834 int i; 844 int i;
835 845
846 mutex_lock(&blkcg_pol_mutex);
847
836 if (!parent_css) { 848 if (!parent_css) {
837 blkcg = &blkcg_root; 849 blkcg = &blkcg_root;
838 goto done; 850 goto done;
@@ -875,14 +887,17 @@ done:
875#ifdef CONFIG_CGROUP_WRITEBACK 887#ifdef CONFIG_CGROUP_WRITEBACK
876 INIT_LIST_HEAD(&blkcg->cgwb_list); 888 INIT_LIST_HEAD(&blkcg->cgwb_list);
877#endif 889#endif
890 list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);
891
892 mutex_unlock(&blkcg_pol_mutex);
878 return &blkcg->css; 893 return &blkcg->css;
879 894
880free_pd_blkcg: 895free_pd_blkcg:
881 for (i--; i >= 0; i--) 896 for (i--; i >= 0; i--)
882 kfree(blkcg->pd[i]); 897 kfree(blkcg->pd[i]);
883
884free_blkcg: 898free_blkcg:
885 kfree(blkcg); 899 kfree(blkcg);
900 mutex_unlock(&blkcg_pol_mutex);
886 return ret; 901 return ret;
887} 902}
888 903
@@ -1037,10 +1052,8 @@ int blkcg_activate_policy(struct request_queue *q,
1037 const struct blkcg_policy *pol) 1052 const struct blkcg_policy *pol)
1038{ 1053{
1039 LIST_HEAD(pds); 1054 LIST_HEAD(pds);
1040 LIST_HEAD(cpds);
1041 struct blkcg_gq *blkg; 1055 struct blkcg_gq *blkg;
1042 struct blkg_policy_data *pd, *nd; 1056 struct blkg_policy_data *pd, *nd;
1043 struct blkcg_policy_data *cpd, *cnd;
1044 int cnt = 0, ret; 1057 int cnt = 0, ret;
1045 1058
1046 if (blkcg_policy_enabled(q, pol)) 1059 if (blkcg_policy_enabled(q, pol))
@@ -1053,10 +1066,7 @@ int blkcg_activate_policy(struct request_queue *q,
1053 cnt++; 1066 cnt++;
1054 spin_unlock_irq(q->queue_lock); 1067 spin_unlock_irq(q->queue_lock);
1055 1068
1056 /* 1069 /* allocate per-blkg policy data for all existing blkgs */
1057 * Allocate per-blkg and per-blkcg policy data
1058 * for all existing blkgs.
1059 */
1060 while (cnt--) { 1070 while (cnt--) {
1061 pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node); 1071 pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node);
1062 if (!pd) { 1072 if (!pd) {
@@ -1064,15 +1074,6 @@ int blkcg_activate_policy(struct request_queue *q,
1064 goto out_free; 1074 goto out_free;
1065 } 1075 }
1066 list_add_tail(&pd->alloc_node, &pds); 1076 list_add_tail(&pd->alloc_node, &pds);
1067
1068 if (!pol->cpd_size)
1069 continue;
1070 cpd = kzalloc_node(pol->cpd_size, GFP_KERNEL, q->node);
1071 if (!cpd) {
1072 ret = -ENOMEM;
1073 goto out_free;
1074 }
1075 list_add_tail(&cpd->alloc_node, &cpds);
1076 } 1077 }
1077 1078
1078 /* 1079 /*
@@ -1082,32 +1083,17 @@ int blkcg_activate_policy(struct request_queue *q,
1082 spin_lock_irq(q->queue_lock); 1083 spin_lock_irq(q->queue_lock);
1083 1084
1084 list_for_each_entry(blkg, &q->blkg_list, q_node) { 1085 list_for_each_entry(blkg, &q->blkg_list, q_node) {
1085 if (WARN_ON(list_empty(&pds)) || 1086 if (WARN_ON(list_empty(&pds))) {
1086 WARN_ON(pol->cpd_size && list_empty(&cpds))) {
1087 /* umm... this shouldn't happen, just abort */ 1087 /* umm... this shouldn't happen, just abort */
1088 ret = -ENOMEM; 1088 ret = -ENOMEM;
1089 goto out_unlock; 1089 goto out_unlock;
1090 } 1090 }
1091 cpd = list_first_entry(&cpds, struct blkcg_policy_data,
1092 alloc_node);
1093 list_del_init(&cpd->alloc_node);
1094 pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node); 1091 pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node);
1095 list_del_init(&pd->alloc_node); 1092 list_del_init(&pd->alloc_node);
1096 1093
1097 /* grab blkcg lock too while installing @pd on @blkg */ 1094 /* grab blkcg lock too while installing @pd on @blkg */
1098 spin_lock(&blkg->blkcg->lock); 1095 spin_lock(&blkg->blkcg->lock);
1099 1096
1100 if (!pol->cpd_size)
1101 goto no_cpd;
1102 if (!blkg->blkcg->pd[pol->plid]) {
1103 /* Per-policy per-blkcg data */
1104 blkg->blkcg->pd[pol->plid] = cpd;
1105 cpd->plid = pol->plid;
1106 pol->cpd_init_fn(blkg->blkcg);
1107 } else { /* must free it as it has already been extracted */
1108 kfree(cpd);
1109 }
1110no_cpd:
1111 blkg->pd[pol->plid] = pd; 1097 blkg->pd[pol->plid] = pd;
1112 pd->blkg = blkg; 1098 pd->blkg = blkg;
1113 pd->plid = pol->plid; 1099 pd->plid = pol->plid;
@@ -1124,8 +1110,6 @@ out_free:
1124 blk_queue_bypass_end(q); 1110 blk_queue_bypass_end(q);
1125 list_for_each_entry_safe(pd, nd, &pds, alloc_node) 1111 list_for_each_entry_safe(pd, nd, &pds, alloc_node)
1126 kfree(pd); 1112 kfree(pd);
1127 list_for_each_entry_safe(cpd, cnd, &cpds, alloc_node)
1128 kfree(cpd);
1129 return ret; 1113 return ret;
1130} 1114}
1131EXPORT_SYMBOL_GPL(blkcg_activate_policy); 1115EXPORT_SYMBOL_GPL(blkcg_activate_policy);
@@ -1162,8 +1146,6 @@ void blkcg_deactivate_policy(struct request_queue *q,
1162 1146
1163 kfree(blkg->pd[pol->plid]); 1147 kfree(blkg->pd[pol->plid]);
1164 blkg->pd[pol->plid] = NULL; 1148 blkg->pd[pol->plid] = NULL;
1165 kfree(blkg->blkcg->pd[pol->plid]);
1166 blkg->blkcg->pd[pol->plid] = NULL;
1167 1149
1168 spin_unlock(&blkg->blkcg->lock); 1150 spin_unlock(&blkg->blkcg->lock);
1169 } 1151 }
@@ -1182,11 +1164,13 @@ EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
1182 */ 1164 */
1183int blkcg_policy_register(struct blkcg_policy *pol) 1165int blkcg_policy_register(struct blkcg_policy *pol)
1184{ 1166{
1167 struct blkcg *blkcg;
1185 int i, ret; 1168 int i, ret;
1186 1169
1187 if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data))) 1170 if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data)))
1188 return -EINVAL; 1171 return -EINVAL;
1189 1172
1173 mutex_lock(&blkcg_pol_register_mutex);
1190 mutex_lock(&blkcg_pol_mutex); 1174 mutex_lock(&blkcg_pol_mutex);
1191 1175
1192 /* find an empty slot */ 1176 /* find an empty slot */
@@ -1195,19 +1179,49 @@ int blkcg_policy_register(struct blkcg_policy *pol)
1195 if (!blkcg_policy[i]) 1179 if (!blkcg_policy[i])
1196 break; 1180 break;
1197 if (i >= BLKCG_MAX_POLS) 1181 if (i >= BLKCG_MAX_POLS)
1198 goto out_unlock; 1182 goto err_unlock;
1199 1183
1200 /* register and update blkgs */ 1184 /* register @pol */
1201 pol->plid = i; 1185 pol->plid = i;
1202 blkcg_policy[i] = pol; 1186 blkcg_policy[pol->plid] = pol;
1187
1188 /* allocate and install cpd's */
1189 if (pol->cpd_size) {
1190 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1191 struct blkcg_policy_data *cpd;
1192
1193 cpd = kzalloc(pol->cpd_size, GFP_KERNEL);
1194 if (!cpd) {
1195 mutex_unlock(&blkcg_pol_mutex);
1196 goto err_free_cpds;
1197 }
1198
1199 blkcg->pd[pol->plid] = cpd;
1200 cpd->plid = pol->plid;
1201 pol->cpd_init_fn(blkcg);
1202 }
1203 }
1204
1205 mutex_unlock(&blkcg_pol_mutex);
1203 1206
1204 /* everything is in place, add intf files for the new policy */ 1207 /* everything is in place, add intf files for the new policy */
1205 if (pol->cftypes) 1208 if (pol->cftypes)
1206 WARN_ON(cgroup_add_legacy_cftypes(&blkio_cgrp_subsys, 1209 WARN_ON(cgroup_add_legacy_cftypes(&blkio_cgrp_subsys,
1207 pol->cftypes)); 1210 pol->cftypes));
1208 ret = 0; 1211 mutex_unlock(&blkcg_pol_register_mutex);
1209out_unlock: 1212 return 0;
1213
1214err_free_cpds:
1215 if (pol->cpd_size) {
1216 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1217 kfree(blkcg->pd[pol->plid]);
1218 blkcg->pd[pol->plid] = NULL;
1219 }
1220 }
1221 blkcg_policy[pol->plid] = NULL;
1222err_unlock:
1210 mutex_unlock(&blkcg_pol_mutex); 1223 mutex_unlock(&blkcg_pol_mutex);
1224 mutex_unlock(&blkcg_pol_register_mutex);
1211 return ret; 1225 return ret;
1212} 1226}
1213EXPORT_SYMBOL_GPL(blkcg_policy_register); 1227EXPORT_SYMBOL_GPL(blkcg_policy_register);
@@ -1220,7 +1234,9 @@ EXPORT_SYMBOL_GPL(blkcg_policy_register);
1220 */ 1234 */
1221void blkcg_policy_unregister(struct blkcg_policy *pol) 1235void blkcg_policy_unregister(struct blkcg_policy *pol)
1222{ 1236{
1223 mutex_lock(&blkcg_pol_mutex); 1237 struct blkcg *blkcg;
1238
1239 mutex_lock(&blkcg_pol_register_mutex);
1224 1240
1225 if (WARN_ON(blkcg_policy[pol->plid] != pol)) 1241 if (WARN_ON(blkcg_policy[pol->plid] != pol))
1226 goto out_unlock; 1242 goto out_unlock;
@@ -1229,9 +1245,19 @@ void blkcg_policy_unregister(struct blkcg_policy *pol)
1229 if (pol->cftypes) 1245 if (pol->cftypes)
1230 cgroup_rm_cftypes(pol->cftypes); 1246 cgroup_rm_cftypes(pol->cftypes);
1231 1247
1232 /* unregister and update blkgs */ 1248 /* remove cpds and unregister */
1249 mutex_lock(&blkcg_pol_mutex);
1250
1251 if (pol->cpd_size) {
1252 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1253 kfree(blkcg->pd[pol->plid]);
1254 blkcg->pd[pol->plid] = NULL;
1255 }
1256 }
1233 blkcg_policy[pol->plid] = NULL; 1257 blkcg_policy[pol->plid] = NULL;
1234out_unlock: 1258
1235 mutex_unlock(&blkcg_pol_mutex); 1259 mutex_unlock(&blkcg_pol_mutex);
1260out_unlock:
1261 mutex_unlock(&blkcg_pol_register_mutex);
1236} 1262}
1237EXPORT_SYMBOL_GPL(blkcg_policy_unregister); 1263EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
diff --git a/block/blk-core.c b/block/blk-core.c
index 82819e68f58b..627ed0c593fb 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -3370,7 +3370,7 @@ EXPORT_SYMBOL(blk_post_runtime_resume);
3370int __init blk_dev_init(void) 3370int __init blk_dev_init(void)
3371{ 3371{
3372 BUILD_BUG_ON(__REQ_NR_BITS > 8 * 3372 BUILD_BUG_ON(__REQ_NR_BITS > 8 *
3373 sizeof(((struct request *)0)->cmd_flags)); 3373 FIELD_SIZEOF(struct request, cmd_flags));
3374 3374
3375 /* used for unplugging and affects IO latency/throughput - HIGHPRI */ 3375 /* used for unplugging and affects IO latency/throughput - HIGHPRI */
3376 kblockd_workqueue = alloc_workqueue("kblockd", 3376 kblockd_workqueue = alloc_workqueue("kblockd",
diff --git a/block/blk-mq.c b/block/blk-mq.c
index f53779692c77..7d842db59699 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1998,7 +1998,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
1998 goto err_hctxs; 1998 goto err_hctxs;
1999 1999
2000 setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q); 2000 setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
2001 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30000); 2001 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
2002 2002
2003 q->nr_queues = nr_cpu_ids; 2003 q->nr_queues = nr_cpu_ids;
2004 q->nr_hw_queues = set->nr_hw_queues; 2004 q->nr_hw_queues = set->nr_hw_queues;
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 12600bfffca9..e0057d035200 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -241,8 +241,8 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
241 * Description: 241 * Description:
242 * Enables a low level driver to set a hard upper limit, 242 * Enables a low level driver to set a hard upper limit,
243 * max_hw_sectors, on the size of requests. max_hw_sectors is set by 243 * max_hw_sectors, on the size of requests. max_hw_sectors is set by
244 * the device driver based upon the combined capabilities of I/O 244 * the device driver based upon the capabilities of the I/O
245 * controller and storage device. 245 * controller.
246 * 246 *
247 * max_sectors is a soft limit imposed by the block layer for 247 * max_sectors is a soft limit imposed by the block layer for
248 * filesystem type requests. This value can be overridden on a 248 * filesystem type requests. This value can be overridden on a
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index a3da6770bc9e..b8efe36ce114 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -393,8 +393,6 @@ static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv,
393 struct scatterlist *cipher = areq_ctx->cipher; 393 struct scatterlist *cipher = areq_ctx->cipher;
394 struct scatterlist *hsg = areq_ctx->hsg; 394 struct scatterlist *hsg = areq_ctx->hsg;
395 struct scatterlist *tsg = areq_ctx->tsg; 395 struct scatterlist *tsg = areq_ctx->tsg;
396 struct scatterlist *assoc1;
397 struct scatterlist *assoc2;
398 unsigned int ivsize = crypto_aead_ivsize(authenc_esn); 396 unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
399 unsigned int cryptlen = req->cryptlen; 397 unsigned int cryptlen = req->cryptlen;
400 struct page *dstp; 398 struct page *dstp;
@@ -412,27 +410,19 @@ static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv,
412 cryptlen += ivsize; 410 cryptlen += ivsize;
413 } 411 }
414 412
415 if (sg_is_last(assoc)) 413 if (assoc->length < 12)
416 return -EINVAL;
417
418 assoc1 = assoc + 1;
419 if (sg_is_last(assoc1))
420 return -EINVAL;
421
422 assoc2 = assoc + 2;
423 if (!sg_is_last(assoc2))
424 return -EINVAL; 414 return -EINVAL;
425 415
426 sg_init_table(hsg, 2); 416 sg_init_table(hsg, 2);
427 sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset); 417 sg_set_page(hsg, sg_page(assoc), 4, assoc->offset);
428 sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset); 418 sg_set_page(hsg + 1, sg_page(assoc), 4, assoc->offset + 8);
429 419
430 sg_init_table(tsg, 1); 420 sg_init_table(tsg, 1);
431 sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset); 421 sg_set_page(tsg, sg_page(assoc), 4, assoc->offset + 4);
432 422
433 areq_ctx->cryptlen = cryptlen; 423 areq_ctx->cryptlen = cryptlen;
434 areq_ctx->headlen = assoc->length + assoc2->length; 424 areq_ctx->headlen = 8;
435 areq_ctx->trailen = assoc1->length; 425 areq_ctx->trailen = 4;
436 areq_ctx->sg = dst; 426 areq_ctx->sg = dst;
437 427
438 areq_ctx->complete = authenc_esn_geniv_ahash_done; 428 areq_ctx->complete = authenc_esn_geniv_ahash_done;
@@ -563,8 +553,6 @@ static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
563 struct scatterlist *cipher = areq_ctx->cipher; 553 struct scatterlist *cipher = areq_ctx->cipher;
564 struct scatterlist *hsg = areq_ctx->hsg; 554 struct scatterlist *hsg = areq_ctx->hsg;
565 struct scatterlist *tsg = areq_ctx->tsg; 555 struct scatterlist *tsg = areq_ctx->tsg;
566 struct scatterlist *assoc1;
567 struct scatterlist *assoc2;
568 unsigned int ivsize = crypto_aead_ivsize(authenc_esn); 556 unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
569 struct page *srcp; 557 struct page *srcp;
570 u8 *vsrc; 558 u8 *vsrc;
@@ -580,27 +568,19 @@ static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
580 cryptlen += ivsize; 568 cryptlen += ivsize;
581 } 569 }
582 570
583 if (sg_is_last(assoc)) 571 if (assoc->length < 12)
584 return -EINVAL;
585
586 assoc1 = assoc + 1;
587 if (sg_is_last(assoc1))
588 return -EINVAL;
589
590 assoc2 = assoc + 2;
591 if (!sg_is_last(assoc2))
592 return -EINVAL; 572 return -EINVAL;
593 573
594 sg_init_table(hsg, 2); 574 sg_init_table(hsg, 2);
595 sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset); 575 sg_set_page(hsg, sg_page(assoc), 4, assoc->offset);
596 sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset); 576 sg_set_page(hsg + 1, sg_page(assoc), 4, assoc->offset + 8);
597 577
598 sg_init_table(tsg, 1); 578 sg_init_table(tsg, 1);
599 sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset); 579 sg_set_page(tsg, sg_page(assoc), 4, assoc->offset + 4);
600 580
601 areq_ctx->cryptlen = cryptlen; 581 areq_ctx->cryptlen = cryptlen;
602 areq_ctx->headlen = assoc->length + assoc2->length; 582 areq_ctx->headlen = 8;
603 areq_ctx->trailen = assoc1->length; 583 areq_ctx->trailen = 4;
604 areq_ctx->sg = src; 584 areq_ctx->sg = src;
605 585
606 areq_ctx->complete = authenc_esn_verify_ahash_done; 586 areq_ctx->complete = authenc_esn_verify_ahash_done;
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 717afcdb5f4a..88dbbb115285 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -231,7 +231,7 @@ int acpi_device_set_power(struct acpi_device *device, int state)
231 dev_warn(&device->dev, "Failed to change power state to %s\n", 231 dev_warn(&device->dev, "Failed to change power state to %s\n",
232 acpi_power_state_string(state)); 232 acpi_power_state_string(state));
233 } else { 233 } else {
234 device->power.state = state; 234 device->power.state = target_state;
235 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 235 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
236 "Device [%s] transitioned to %s\n", 236 "Device [%s] transitioned to %s\n",
237 device->pnp.bus_id, 237 device->pnp.bus_id,
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 8244f013f210..f1c966e05078 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -193,6 +193,7 @@ static bool acpi_decode_space(struct resource_win *win,
193 u8 iodec = attr->granularity == 0xfff ? ACPI_DECODE_10 : ACPI_DECODE_16; 193 u8 iodec = attr->granularity == 0xfff ? ACPI_DECODE_10 : ACPI_DECODE_16;
194 bool wp = addr->info.mem.write_protect; 194 bool wp = addr->info.mem.write_protect;
195 u64 len = attr->address_length; 195 u64 len = attr->address_length;
196 u64 start, end, offset = 0;
196 struct resource *res = &win->res; 197 struct resource *res = &win->res;
197 198
198 /* 199 /*
@@ -204,9 +205,6 @@ static bool acpi_decode_space(struct resource_win *win,
204 pr_debug("ACPI: Invalid address space min_addr_fix %d, max_addr_fix %d, len %llx\n", 205 pr_debug("ACPI: Invalid address space min_addr_fix %d, max_addr_fix %d, len %llx\n",
205 addr->min_address_fixed, addr->max_address_fixed, len); 206 addr->min_address_fixed, addr->max_address_fixed, len);
206 207
207 res->start = attr->minimum;
208 res->end = attr->maximum;
209
210 /* 208 /*
211 * For bridges that translate addresses across the bridge, 209 * For bridges that translate addresses across the bridge,
212 * translation_offset is the offset that must be added to the 210 * translation_offset is the offset that must be added to the
@@ -214,12 +212,22 @@ static bool acpi_decode_space(struct resource_win *win,
214 * primary side. Non-bridge devices must list 0 for all Address 212 * primary side. Non-bridge devices must list 0 for all Address
215 * Translation offset bits. 213 * Translation offset bits.
216 */ 214 */
217 if (addr->producer_consumer == ACPI_PRODUCER) { 215 if (addr->producer_consumer == ACPI_PRODUCER)
218 res->start += attr->translation_offset; 216 offset = attr->translation_offset;
219 res->end += attr->translation_offset; 217 else if (attr->translation_offset)
220 } else if (attr->translation_offset) {
221 pr_debug("ACPI: translation_offset(%lld) is invalid for non-bridge device.\n", 218 pr_debug("ACPI: translation_offset(%lld) is invalid for non-bridge device.\n",
222 attr->translation_offset); 219 attr->translation_offset);
220 start = attr->minimum + offset;
221 end = attr->maximum + offset;
222
223 win->offset = offset;
224 res->start = start;
225 res->end = end;
226 if (sizeof(resource_size_t) < sizeof(u64) &&
227 (offset != win->offset || start != res->start || end != res->end)) {
228 pr_warn("acpi resource window ([%#llx-%#llx] ignored, not CPU addressable)\n",
229 attr->minimum, attr->maximum);
230 return false;
223 } 231 }
224 232
225 switch (addr->resource_type) { 233 switch (addr->resource_type) {
@@ -236,8 +244,6 @@ static bool acpi_decode_space(struct resource_win *win,
236 return false; 244 return false;
237 } 245 }
238 246
239 win->offset = attr->translation_offset;
240
241 if (addr->producer_consumer == ACPI_PRODUCER) 247 if (addr->producer_consumer == ACPI_PRODUCER)
242 res->flags |= IORESOURCE_WINDOW; 248 res->flags |= IORESOURCE_WINDOW;
243 249
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 815f75ef2411..2922f1f252d5 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -32,6 +32,7 @@
32#include <linux/module.h> 32#include <linux/module.h>
33#include <linux/pci.h> 33#include <linux/pci.h>
34#include <linux/types.h> 34#include <linux/types.h>
35#include <linux/workqueue.h>
35#include <acpi/video.h> 36#include <acpi/video.h>
36 37
37ACPI_MODULE_NAME("video"); 38ACPI_MODULE_NAME("video");
@@ -41,6 +42,7 @@ void acpi_video_unregister_backlight(void);
41 42
42static bool backlight_notifier_registered; 43static bool backlight_notifier_registered;
43static struct notifier_block backlight_nb; 44static struct notifier_block backlight_nb;
45static struct work_struct backlight_notify_work;
44 46
45static enum acpi_backlight_type acpi_backlight_cmdline = acpi_backlight_undef; 47static enum acpi_backlight_type acpi_backlight_cmdline = acpi_backlight_undef;
46static enum acpi_backlight_type acpi_backlight_dmi = acpi_backlight_undef; 48static enum acpi_backlight_type acpi_backlight_dmi = acpi_backlight_undef;
@@ -262,6 +264,13 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
262 { }, 264 { },
263}; 265};
264 266
267/* This uses a workqueue to avoid various locking ordering issues */
268static void acpi_video_backlight_notify_work(struct work_struct *work)
269{
270 if (acpi_video_get_backlight_type() != acpi_backlight_video)
271 acpi_video_unregister_backlight();
272}
273
265static int acpi_video_backlight_notify(struct notifier_block *nb, 274static int acpi_video_backlight_notify(struct notifier_block *nb,
266 unsigned long val, void *bd) 275 unsigned long val, void *bd)
267{ 276{
@@ -269,9 +278,8 @@ static int acpi_video_backlight_notify(struct notifier_block *nb,
269 278
270 /* A raw bl registering may change video -> native */ 279 /* A raw bl registering may change video -> native */
271 if (backlight->props.type == BACKLIGHT_RAW && 280 if (backlight->props.type == BACKLIGHT_RAW &&
272 val == BACKLIGHT_REGISTERED && 281 val == BACKLIGHT_REGISTERED)
273 acpi_video_get_backlight_type() != acpi_backlight_video) 282 schedule_work(&backlight_notify_work);
274 acpi_video_unregister_backlight();
275 283
276 return NOTIFY_OK; 284 return NOTIFY_OK;
277} 285}
@@ -304,6 +312,8 @@ enum acpi_backlight_type acpi_video_get_backlight_type(void)
304 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, 312 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
305 ACPI_UINT32_MAX, find_video, NULL, 313 ACPI_UINT32_MAX, find_video, NULL,
306 &video_caps, NULL); 314 &video_caps, NULL);
315 INIT_WORK(&backlight_notify_work,
316 acpi_video_backlight_notify_work);
307 backlight_nb.notifier_call = acpi_video_backlight_notify; 317 backlight_nb.notifier_call = acpi_video_backlight_notify;
308 backlight_nb.priority = 0; 318 backlight_nb.priority = 0;
309 if (backlight_register_notifier(&backlight_nb) == 0) 319 if (backlight_register_notifier(&backlight_nb) == 0)
diff --git a/drivers/ata/ahci_brcmstb.c b/drivers/ata/ahci_brcmstb.c
index ce1e3a885981..14b7305d2ba0 100644
--- a/drivers/ata/ahci_brcmstb.c
+++ b/drivers/ata/ahci_brcmstb.c
@@ -92,7 +92,7 @@ static inline u32 brcm_sata_readreg(void __iomem *addr)
92 * Other architectures (e.g., ARM) either do not support big endian, or 92 * Other architectures (e.g., ARM) either do not support big endian, or
93 * else leave I/O in little endian mode. 93 * else leave I/O in little endian mode.
94 */ 94 */
95 if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN)) 95 if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
96 return __raw_readl(addr); 96 return __raw_readl(addr);
97 else 97 else
98 return readl_relaxed(addr); 98 return readl_relaxed(addr);
@@ -101,7 +101,7 @@ static inline u32 brcm_sata_readreg(void __iomem *addr)
101static inline void brcm_sata_writereg(u32 val, void __iomem *addr) 101static inline void brcm_sata_writereg(u32 val, void __iomem *addr)
102{ 102{
103 /* See brcm_sata_readreg() comments */ 103 /* See brcm_sata_readreg() comments */
104 if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN)) 104 if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
105 __raw_writel(val, addr); 105 __raw_writel(val, addr);
106 else 106 else
107 writel_relaxed(val, addr); 107 writel_relaxed(val, addr);
@@ -209,6 +209,7 @@ static void brcm_sata_init(struct brcm_ahci_priv *priv)
209 priv->top_ctrl + SATA_TOP_CTRL_BUS_CTRL); 209 priv->top_ctrl + SATA_TOP_CTRL_BUS_CTRL);
210} 210}
211 211
212#ifdef CONFIG_PM_SLEEP
212static int brcm_ahci_suspend(struct device *dev) 213static int brcm_ahci_suspend(struct device *dev)
213{ 214{
214 struct ata_host *host = dev_get_drvdata(dev); 215 struct ata_host *host = dev_get_drvdata(dev);
@@ -231,6 +232,7 @@ static int brcm_ahci_resume(struct device *dev)
231 brcm_sata_phys_enable(priv); 232 brcm_sata_phys_enable(priv);
232 return ahci_platform_resume(dev); 233 return ahci_platform_resume(dev);
233} 234}
235#endif
234 236
235static struct scsi_host_template ahci_platform_sht = { 237static struct scsi_host_template ahci_platform_sht = {
236 AHCI_SHT(DRV_NAME), 238 AHCI_SHT(DRV_NAME),
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index e83fc3d0da9c..19bcb80b2031 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -694,11 +694,11 @@ static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
694 * RETURNS: 694 * RETURNS:
695 * Block address read from @tf. 695 * Block address read from @tf.
696 */ 696 */
697u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev) 697u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
698{ 698{
699 u64 block = 0; 699 u64 block = 0;
700 700
701 if (!dev || tf->flags & ATA_TFLAG_LBA) { 701 if (tf->flags & ATA_TFLAG_LBA) {
702 if (tf->flags & ATA_TFLAG_LBA48) { 702 if (tf->flags & ATA_TFLAG_LBA48) {
703 block |= (u64)tf->hob_lbah << 40; 703 block |= (u64)tf->hob_lbah << 40;
704 block |= (u64)tf->hob_lbam << 32; 704 block |= (u64)tf->hob_lbam << 32;
@@ -2147,24 +2147,6 @@ static int ata_dev_config_ncq(struct ata_device *dev,
2147 return 0; 2147 return 0;
2148} 2148}
2149 2149
2150static void ata_dev_config_sense_reporting(struct ata_device *dev)
2151{
2152 unsigned int err_mask;
2153
2154 if (!ata_id_has_sense_reporting(dev->id))
2155 return;
2156
2157 if (ata_id_sense_reporting_enabled(dev->id))
2158 return;
2159
2160 err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
2161 if (err_mask) {
2162 ata_dev_dbg(dev,
2163 "failed to enable Sense Data Reporting, Emask 0x%x\n",
2164 err_mask);
2165 }
2166}
2167
2168/** 2150/**
2169 * ata_dev_configure - Configure the specified ATA/ATAPI device 2151 * ata_dev_configure - Configure the specified ATA/ATAPI device
2170 * @dev: Target device to configure 2152 * @dev: Target device to configure
@@ -2387,7 +2369,7 @@ int ata_dev_configure(struct ata_device *dev)
2387 dev->devslp_timing[i] = sata_setting[j]; 2369 dev->devslp_timing[i] = sata_setting[j];
2388 } 2370 }
2389 } 2371 }
2390 ata_dev_config_sense_reporting(dev); 2372
2391 dev->cdb_len = 16; 2373 dev->cdb_len = 16;
2392 } 2374 }
2393 2375
@@ -2478,6 +2460,10 @@ int ata_dev_configure(struct ata_device *dev)
2478 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, 2460 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2479 dev->max_sectors); 2461 dev->max_sectors);
2480 2462
2463 if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
2464 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
2465 dev->max_sectors);
2466
2481 if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48) 2467 if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
2482 dev->max_sectors = ATA_MAX_SECTORS_LBA48; 2468 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2483 2469
@@ -4146,6 +4132,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4146 { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, 4132 { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
4147 { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, 4133 { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
4148 4134
4135 /*
4136 * Causes silent data corruption with higher max sects.
4137 * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com
4138 */
4139 { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
4140
4149 /* Devices we expect to fail diagnostics */ 4141 /* Devices we expect to fail diagnostics */
4150 4142
4151 /* Devices where NCQ should be avoided */ 4143 /* Devices where NCQ should be avoided */
@@ -4174,9 +4166,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4174 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4166 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4175 ATA_HORKAGE_FIRMWARE_WARN }, 4167 ATA_HORKAGE_FIRMWARE_WARN },
4176 4168
4177 /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */ 4169 /* drives which fail FPDMA_AA activation (some may freeze afterwards) */
4178 { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA }, 4170 { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
4179 { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA }, 4171 { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
4172 { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },
4180 4173
4181 /* Blacklist entries taken from Silicon Image 3124/3132 4174 /* Blacklist entries taken from Silicon Image 3124/3132
4182 Windows driver .inf file - also several Linux problem reports */ 4175 Windows driver .inf file - also several Linux problem reports */
@@ -4229,7 +4222,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4229 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4222 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4230 { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4223 { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4231 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4224 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4232 { "Micron_M5[15]0*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | 4225 { "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4233 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4226 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4234 { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | 4227 { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4235 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4228 ATA_HORKAGE_ZERO_AFTER_TRIM, },
@@ -4238,6 +4231,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4238 { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4231 { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4239 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4232 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4240 4233
4234 /* devices that don't properly handle TRIM commands */
4235 { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, },
4236
4241 /* 4237 /*
4242 * As defined, the DRAT (Deterministic Read After Trim) and RZAT 4238 * As defined, the DRAT (Deterministic Read After Trim) and RZAT
4243 * (Return Zero After Trim) flags in the ATA Command Set are 4239 * (Return Zero After Trim) flags in the ATA Command Set are
@@ -4501,7 +4497,8 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4501 else /* In the ancient relic department - skip all of this */ 4497 else /* In the ancient relic department - skip all of this */
4502 return 0; 4498 return 0;
4503 4499
4504 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4500 /* On some disks, this command causes spin-up, so we need longer timeout */
4501 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
4505 4502
4506 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4503 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4507 return err_mask; 4504 return err_mask;
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 7465031a893c..cb0508af1459 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1592,8 +1592,6 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
1592 tf->hob_lbah = buf[10]; 1592 tf->hob_lbah = buf[10];
1593 tf->nsect = buf[12]; 1593 tf->nsect = buf[12];
1594 tf->hob_nsect = buf[13]; 1594 tf->hob_nsect = buf[13];
1595 if (ata_id_has_ncq_autosense(dev->id))
1596 tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16];
1597 1595
1598 return 0; 1596 return 0;
1599} 1597}
@@ -1630,70 +1628,6 @@ unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
1630} 1628}
1631 1629
1632/** 1630/**
1633 * ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
1634 * @dev: device to perform REQUEST_SENSE_SENSE_DATA_EXT to
1635 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
1636 * @dfl_sense_key: default sense key to use
1637 *
1638 * Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK
1639 * SENSE. This function is EH helper.
1640 *
1641 * LOCKING:
1642 * Kernel thread context (may sleep).
1643 *
1644 * RETURNS:
1645 * encoded sense data on success, 0 on failure or if sense data
1646 * is not available.
1647 */
1648static u32 ata_eh_request_sense(struct ata_queued_cmd *qc,
1649 struct scsi_cmnd *cmd)
1650{
1651 struct ata_device *dev = qc->dev;
1652 struct ata_taskfile tf;
1653 unsigned int err_mask;
1654
1655 if (!cmd)
1656 return 0;
1657
1658 DPRINTK("ATA request sense\n");
1659 ata_dev_warn(dev, "request sense\n");
1660 if (!ata_id_sense_reporting_enabled(dev->id)) {
1661 ata_dev_warn(qc->dev, "sense data reporting disabled\n");
1662 return 0;
1663 }
1664 ata_tf_init(dev, &tf);
1665
1666 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1667 tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1668 tf.command = ATA_CMD_REQ_SENSE_DATA;
1669 tf.protocol = ATA_PROT_NODATA;
1670
1671 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1672 /*
1673 * ACS-4 states:
1674 * The device may set the SENSE DATA AVAILABLE bit to one in the
1675 * STATUS field and clear the ERROR bit to zero in the STATUS field
1676 * to indicate that the command returned completion without an error
1677 * and the sense data described in table 306 is available.
1678 *
1679 * IOW the 'ATA_SENSE' bit might not be set even though valid
1680 * sense data is available.
1681 * So check for both.
1682 */
1683 if ((tf.command & ATA_SENSE) ||
1684 tf.lbah != 0 || tf.lbam != 0 || tf.lbal != 0) {
1685 ata_scsi_set_sense(cmd, tf.lbah, tf.lbam, tf.lbal);
1686 qc->flags |= ATA_QCFLAG_SENSE_VALID;
1687 ata_dev_warn(dev, "sense data %02x/%02x/%02x\n",
1688 tf.lbah, tf.lbam, tf.lbal);
1689 } else {
1690 ata_dev_warn(dev, "request sense failed stat %02x emask %x\n",
1691 tf.command, err_mask);
1692 }
1693 return err_mask;
1694}
1695
1696/**
1697 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE 1631 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
1698 * @dev: device to perform REQUEST_SENSE to 1632 * @dev: device to perform REQUEST_SENSE to
1699 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long) 1633 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
@@ -1855,19 +1789,6 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
1855 memcpy(&qc->result_tf, &tf, sizeof(tf)); 1789 memcpy(&qc->result_tf, &tf, sizeof(tf));
1856 qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48; 1790 qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1857 qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ; 1791 qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
1858 if (qc->result_tf.auxiliary) {
1859 char sense_key, asc, ascq;
1860
1861 sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
1862 asc = (qc->result_tf.auxiliary >> 8) & 0xff;
1863 ascq = qc->result_tf.auxiliary & 0xff;
1864 ata_dev_dbg(dev, "NCQ Autosense %02x/%02x/%02x\n",
1865 sense_key, asc, ascq);
1866 ata_scsi_set_sense(qc->scsicmd, sense_key, asc, ascq);
1867 ata_scsi_set_sense_information(qc->scsicmd, &qc->result_tf);
1868 qc->flags |= ATA_QCFLAG_SENSE_VALID;
1869 }
1870
1871 ehc->i.err_mask &= ~AC_ERR_DEV; 1792 ehc->i.err_mask &= ~AC_ERR_DEV;
1872} 1793}
1873 1794
@@ -1897,27 +1818,6 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1897 return ATA_EH_RESET; 1818 return ATA_EH_RESET;
1898 } 1819 }
1899 1820
1900 /*
1901 * Sense data reporting does not work if the
1902 * device fault bit is set.
1903 */
1904 if ((stat & ATA_SENSE) && !(stat & ATA_DF) &&
1905 !(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
1906 if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
1907 tmp = ata_eh_request_sense(qc, qc->scsicmd);
1908 if (tmp)
1909 qc->err_mask |= tmp;
1910 else
1911 ata_scsi_set_sense_information(qc->scsicmd, tf);
1912 } else {
1913 ata_dev_warn(qc->dev, "sense data available but port frozen\n");
1914 }
1915 }
1916
1917 /* Set by NCQ autosense or request sense above */
1918 if (qc->flags & ATA_QCFLAG_SENSE_VALID)
1919 return 0;
1920
1921 if (stat & (ATA_ERR | ATA_DF)) 1821 if (stat & (ATA_ERR | ATA_DF))
1922 qc->err_mask |= AC_ERR_DEV; 1822 qc->err_mask |= AC_ERR_DEV;
1923 else 1823 else
@@ -2661,15 +2561,14 @@ static void ata_eh_link_report(struct ata_link *link)
2661 2561
2662#ifdef CONFIG_ATA_VERBOSE_ERROR 2562#ifdef CONFIG_ATA_VERBOSE_ERROR
2663 if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | 2563 if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
2664 ATA_SENSE | ATA_ERR)) { 2564 ATA_ERR)) {
2665 if (res->command & ATA_BUSY) 2565 if (res->command & ATA_BUSY)
2666 ata_dev_err(qc->dev, "status: { Busy }\n"); 2566 ata_dev_err(qc->dev, "status: { Busy }\n");
2667 else 2567 else
2668 ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n", 2568 ata_dev_err(qc->dev, "status: { %s%s%s%s}\n",
2669 res->command & ATA_DRDY ? "DRDY " : "", 2569 res->command & ATA_DRDY ? "DRDY " : "",
2670 res->command & ATA_DF ? "DF " : "", 2570 res->command & ATA_DF ? "DF " : "",
2671 res->command & ATA_DRQ ? "DRQ " : "", 2571 res->command & ATA_DRQ ? "DRQ " : "",
2672 res->command & ATA_SENSE ? "SENSE " : "",
2673 res->command & ATA_ERR ? "ERR " : ""); 2572 res->command & ATA_ERR ? "ERR " : "");
2674 } 2573 }
2675 2574
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index 7ccc084bf1df..85aa76116a30 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -460,6 +460,13 @@ static void sata_pmp_quirks(struct ata_port *ap)
460 ATA_LFLAG_NO_SRST | 460 ATA_LFLAG_NO_SRST |
461 ATA_LFLAG_ASSUME_ATA; 461 ATA_LFLAG_ASSUME_ATA;
462 } 462 }
463 } else if (vendor == 0x11ab && devid == 0x4140) {
464 /* Marvell 4140 quirks */
465 ata_for_each_link(link, ap, EDGE) {
466 /* port 4 is for SEMB device and it doesn't like SRST */
467 if (link->pmp == 4)
468 link->flags |= ATA_LFLAG_DISABLED;
469 }
463 } 470 }
464} 471}
465 472
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 3131adcc1f87..0d7f0da3a269 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -270,28 +270,13 @@ DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR,
270 ata_scsi_park_show, ata_scsi_park_store); 270 ata_scsi_park_show, ata_scsi_park_store);
271EXPORT_SYMBOL_GPL(dev_attr_unload_heads); 271EXPORT_SYMBOL_GPL(dev_attr_unload_heads);
272 272
273void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq) 273static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
274{ 274{
275 if (!cmd)
276 return;
277
278 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; 275 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
279 276
280 scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq); 277 scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq);
281} 278}
282 279
283void ata_scsi_set_sense_information(struct scsi_cmnd *cmd,
284 const struct ata_taskfile *tf)
285{
286 u64 information;
287
288 if (!cmd)
289 return;
290
291 information = ata_tf_read_block(tf, NULL);
292 scsi_set_sense_information(cmd->sense_buffer, information);
293}
294
295static ssize_t 280static ssize_t
296ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr, 281ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr,
297 const char *buf, size_t count) 282 const char *buf, size_t count)
@@ -1792,9 +1777,7 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1792 ((cdb[2] & 0x20) || need_sense)) { 1777 ((cdb[2] & 0x20) || need_sense)) {
1793 ata_gen_passthru_sense(qc); 1778 ata_gen_passthru_sense(qc);
1794 } else { 1779 } else {
1795 if (qc->flags & ATA_QCFLAG_SENSE_VALID) { 1780 if (!need_sense) {
1796 cmd->result = SAM_STAT_CHECK_CONDITION;
1797 } else if (!need_sense) {
1798 cmd->result = SAM_STAT_GOOD; 1781 cmd->result = SAM_STAT_GOOD;
1799 } else { 1782 } else {
1800 /* TODO: decide which descriptor format to use 1783 /* TODO: decide which descriptor format to use
@@ -2568,7 +2551,8 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
2568 rbuf[14] = (lowest_aligned >> 8) & 0x3f; 2551 rbuf[14] = (lowest_aligned >> 8) & 0x3f;
2569 rbuf[15] = lowest_aligned; 2552 rbuf[15] = lowest_aligned;
2570 2553
2571 if (ata_id_has_trim(args->id)) { 2554 if (ata_id_has_trim(args->id) &&
2555 !(dev->horkage & ATA_HORKAGE_NOTRIM)) {
2572 rbuf[14] |= 0x80; /* LBPME */ 2556 rbuf[14] |= 0x80; /* LBPME */
2573 2557
2574 if (ata_id_has_zero_after_trim(args->id) && 2558 if (ata_id_has_zero_after_trim(args->id) &&
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index d6c37bcd416d..e2d94972962d 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -569,6 +569,8 @@ show_ata_dev_trim(struct device *dev,
569 569
570 if (!ata_id_has_trim(ata_dev->id)) 570 if (!ata_id_has_trim(ata_dev->id))
571 mode = "unsupported"; 571 mode = "unsupported";
572 else if (ata_dev->horkage & ATA_HORKAGE_NOTRIM)
573 mode = "forced_unsupported";
572 else if (ata_dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) 574 else if (ata_dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM)
573 mode = "forced_unqueued"; 575 mode = "forced_unqueued";
574 else if (ata_fpdma_dsm_supported(ata_dev)) 576 else if (ata_fpdma_dsm_supported(ata_dev))
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index a998a175f9f1..f840ca18a7c0 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -67,8 +67,7 @@ extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag);
67extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, 67extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
68 u64 block, u32 n_block, unsigned int tf_flags, 68 u64 block, u32 n_block, unsigned int tf_flags,
69 unsigned int tag); 69 unsigned int tag);
70extern u64 ata_tf_read_block(const struct ata_taskfile *tf, 70extern u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev);
71 struct ata_device *dev);
72extern unsigned ata_exec_internal(struct ata_device *dev, 71extern unsigned ata_exec_internal(struct ata_device *dev,
73 struct ata_taskfile *tf, const u8 *cdb, 72 struct ata_taskfile *tf, const u8 *cdb,
74 int dma_dir, void *buf, unsigned int buflen, 73 int dma_dir, void *buf, unsigned int buflen,
@@ -138,9 +137,6 @@ extern int ata_scsi_add_hosts(struct ata_host *host,
138 struct scsi_host_template *sht); 137 struct scsi_host_template *sht);
139extern void ata_scsi_scan_host(struct ata_port *ap, int sync); 138extern void ata_scsi_scan_host(struct ata_port *ap, int sync);
140extern int ata_scsi_offline_dev(struct ata_device *dev); 139extern int ata_scsi_offline_dev(struct ata_device *dev);
141extern void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq);
142extern void ata_scsi_set_sense_information(struct scsi_cmnd *cmd,
143 const struct ata_taskfile *tf);
144extern void ata_scsi_media_change_notify(struct ata_device *dev); 140extern void ata_scsi_media_change_notify(struct ata_device *dev);
145extern void ata_scsi_hotplug(struct work_struct *work); 141extern void ata_scsi_hotplug(struct work_struct *work);
146extern void ata_schedule_scsi_eh(struct Scsi_Host *shost); 142extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index a9b0c820f2eb..5d9ee99c2148 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -4,7 +4,7 @@
4 * Arasan Compact Flash host controller source file 4 * Arasan Compact Flash host controller source file
5 * 5 *
6 * Copyright (C) 2011 ST Microelectronics 6 * Copyright (C) 2011 ST Microelectronics
7 * Viresh Kumar <viresh.linux@gmail.com> 7 * Viresh Kumar <vireshk@kernel.org>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
@@ -968,7 +968,7 @@ static struct platform_driver arasan_cf_driver = {
968 968
969module_platform_driver(arasan_cf_driver); 969module_platform_driver(arasan_cf_driver);
970 970
971MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); 971MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
972MODULE_DESCRIPTION("Arasan ATA Compact Flash driver"); 972MODULE_DESCRIPTION("Arasan ATA Compact Flash driver");
973MODULE_LICENSE("GPL"); 973MODULE_LICENSE("GPL");
974MODULE_ALIAS("platform:" DRIVER_NAME); 974MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index 3a18a8a719b4..fab504fd9cfd 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -1238,8 +1238,12 @@ static unsigned int pdc20621_prog_dimm_global(struct ata_host *host)
1238 readl(mmio + PDC_SDRAM_CONTROL); 1238 readl(mmio + PDC_SDRAM_CONTROL);
1239 1239
1240 /* Turn on for ECC */ 1240 /* Turn on for ECC */
1241 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 1241 if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1242 PDC_DIMM_SPD_TYPE, &spd0); 1242 PDC_DIMM_SPD_TYPE, &spd0)) {
1243 pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
1244 PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
1245 return 1;
1246 }
1243 if (spd0 == 0x02) { 1247 if (spd0 == 0x02) {
1244 data |= (0x01 << 16); 1248 data |= (0x01 << 16);
1245 writel(data, mmio + PDC_SDRAM_CONTROL); 1249 writel(data, mmio + PDC_SDRAM_CONTROL);
@@ -1380,8 +1384,12 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
1380 1384
1381 /* ECC initiliazation. */ 1385 /* ECC initiliazation. */
1382 1386
1383 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 1387 if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1384 PDC_DIMM_SPD_TYPE, &spd0); 1388 PDC_DIMM_SPD_TYPE, &spd0)) {
1389 pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
1390 PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
1391 return 1;
1392 }
1385 if (spd0 == 0x02) { 1393 if (spd0 == 0x02) {
1386 void *buf; 1394 void *buf;
1387 VPRINTK("Start ECC initialization\n"); 1395 VPRINTK("Start ECC initialization\n");
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index 81751a49d8bf..56486d92c4e7 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -296,11 +296,20 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
296 if (!blk) 296 if (!blk)
297 return -ENOMEM; 297 return -ENOMEM;
298 298
299 present = krealloc(rbnode->cache_present, 299 if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
300 BITS_TO_LONGS(blklen) * sizeof(*present), GFP_KERNEL); 300 present = krealloc(rbnode->cache_present,
301 if (!present) { 301 BITS_TO_LONGS(blklen) * sizeof(*present),
302 kfree(blk); 302 GFP_KERNEL);
303 return -ENOMEM; 303 if (!present) {
304 kfree(blk);
305 return -ENOMEM;
306 }
307
308 memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
309 (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
310 * sizeof(*present));
311 } else {
312 present = rbnode->cache_present;
304 } 313 }
305 314
306 /* insert the register value in the correct place in the rbnode block */ 315 /* insert the register value in the correct place in the rbnode block */
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 69de41a87b74..3177b245d2bd 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -240,19 +240,19 @@ static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
240 while ((entry = llist_del_all(&cq->list)) != NULL) { 240 while ((entry = llist_del_all(&cq->list)) != NULL) {
241 entry = llist_reverse_order(entry); 241 entry = llist_reverse_order(entry);
242 do { 242 do {
243 struct request_queue *q = NULL;
244
243 cmd = container_of(entry, struct nullb_cmd, ll_list); 245 cmd = container_of(entry, struct nullb_cmd, ll_list);
244 entry = entry->next; 246 entry = entry->next;
247 if (cmd->rq)
248 q = cmd->rq->q;
245 end_cmd(cmd); 249 end_cmd(cmd);
246 250
247 if (cmd->rq) { 251 if (q && !q->mq_ops && blk_queue_stopped(q)) {
248 struct request_queue *q = cmd->rq->q; 252 spin_lock(q->queue_lock);
249 253 if (blk_queue_stopped(q))
250 if (!q->mq_ops && blk_queue_stopped(q)) { 254 blk_start_queue(q);
251 spin_lock(q->queue_lock); 255 spin_unlock(q->queue_lock);
252 if (blk_queue_stopped(q))
253 blk_start_queue(q);
254 spin_unlock(q->queue_lock);
255 }
256 } 256 }
257 } while (entry); 257 } while (entry);
258 } 258 }
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index d1d6141920d3..7920c2741b47 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -2108,8 +2108,17 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
2108 goto out_free_disk; 2108 goto out_free_disk;
2109 2109
2110 add_disk(ns->disk); 2110 add_disk(ns->disk);
2111 if (ns->ms) 2111 if (ns->ms) {
2112 revalidate_disk(ns->disk); 2112 struct block_device *bd = bdget_disk(ns->disk, 0);
2113 if (!bd)
2114 return;
2115 if (blkdev_get(bd, FMODE_READ, NULL)) {
2116 bdput(bd);
2117 return;
2118 }
2119 blkdev_reread_part(bd);
2120 blkdev_put(bd, FMODE_READ);
2121 }
2113 return; 2122 return;
2114 out_free_disk: 2123 out_free_disk:
2115 kfree(disk); 2124 kfree(disk);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index d94529d5c8e9..bc67a93aa4f4 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -523,6 +523,7 @@ void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
523# define rbd_assert(expr) ((void) 0) 523# define rbd_assert(expr) ((void) 0)
524#endif /* !RBD_DEBUG */ 524#endif /* !RBD_DEBUG */
525 525
526static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
526static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request); 527static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
527static void rbd_img_parent_read(struct rbd_obj_request *obj_request); 528static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
528static void rbd_dev_remove_parent(struct rbd_device *rbd_dev); 529static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
@@ -1818,6 +1819,16 @@ static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1818 obj_request_done_set(obj_request); 1819 obj_request_done_set(obj_request);
1819} 1820}
1820 1821
1822static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
1823{
1824 dout("%s: obj %p\n", __func__, obj_request);
1825
1826 if (obj_request_img_data_test(obj_request))
1827 rbd_osd_copyup_callback(obj_request);
1828 else
1829 obj_request_done_set(obj_request);
1830}
1831
1821static void rbd_osd_req_callback(struct ceph_osd_request *osd_req, 1832static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1822 struct ceph_msg *msg) 1833 struct ceph_msg *msg)
1823{ 1834{
@@ -1866,6 +1877,8 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1866 rbd_osd_discard_callback(obj_request); 1877 rbd_osd_discard_callback(obj_request);
1867 break; 1878 break;
1868 case CEPH_OSD_OP_CALL: 1879 case CEPH_OSD_OP_CALL:
1880 rbd_osd_call_callback(obj_request);
1881 break;
1869 case CEPH_OSD_OP_NOTIFY_ACK: 1882 case CEPH_OSD_OP_NOTIFY_ACK:
1870 case CEPH_OSD_OP_WATCH: 1883 case CEPH_OSD_OP_WATCH:
1871 rbd_osd_trivial_callback(obj_request); 1884 rbd_osd_trivial_callback(obj_request);
@@ -2530,13 +2543,15 @@ out_unwind:
2530} 2543}
2531 2544
2532static void 2545static void
2533rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request) 2546rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
2534{ 2547{
2535 struct rbd_img_request *img_request; 2548 struct rbd_img_request *img_request;
2536 struct rbd_device *rbd_dev; 2549 struct rbd_device *rbd_dev;
2537 struct page **pages; 2550 struct page **pages;
2538 u32 page_count; 2551 u32 page_count;
2539 2552
2553 dout("%s: obj %p\n", __func__, obj_request);
2554
2540 rbd_assert(obj_request->type == OBJ_REQUEST_BIO || 2555 rbd_assert(obj_request->type == OBJ_REQUEST_BIO ||
2541 obj_request->type == OBJ_REQUEST_NODATA); 2556 obj_request->type == OBJ_REQUEST_NODATA);
2542 rbd_assert(obj_request_img_data_test(obj_request)); 2557 rbd_assert(obj_request_img_data_test(obj_request));
@@ -2563,9 +2578,7 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
2563 if (!obj_request->result) 2578 if (!obj_request->result)
2564 obj_request->xferred = obj_request->length; 2579 obj_request->xferred = obj_request->length;
2565 2580
2566 /* Finish up with the normal image object callback */ 2581 obj_request_done_set(obj_request);
2567
2568 rbd_img_obj_callback(obj_request);
2569} 2582}
2570 2583
2571static void 2584static void
@@ -2650,7 +2663,6 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2650 2663
2651 /* All set, send it off. */ 2664 /* All set, send it off. */
2652 2665
2653 orig_request->callback = rbd_img_obj_copyup_callback;
2654 osdc = &rbd_dev->rbd_client->client->osdc; 2666 osdc = &rbd_dev->rbd_client->client->osdc;
2655 img_result = rbd_obj_request_submit(osdc, orig_request); 2667 img_result = rbd_obj_request_submit(osdc, orig_request);
2656 if (!img_result) 2668 if (!img_result)
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index ced96777b677..954c0029fb3b 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -369,8 +369,8 @@ static void purge_persistent_gnt(struct xen_blkif *blkif)
369 return; 369 return;
370 } 370 }
371 371
372 if (work_pending(&blkif->persistent_purge_work)) { 372 if (work_busy(&blkif->persistent_purge_work)) {
373 pr_alert_ratelimited("Scheduled work from previous purge is still pending, cannot purge list\n"); 373 pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n");
374 return; 374 return;
375 } 375 }
376 376
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 6d89ed35d80c..7a8a73f1fc04 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -179,6 +179,7 @@ static DEFINE_SPINLOCK(minor_lock);
179 ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME) 179 ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
180 180
181static int blkfront_setup_indirect(struct blkfront_info *info); 181static int blkfront_setup_indirect(struct blkfront_info *info);
182static int blkfront_gather_backend_features(struct blkfront_info *info);
182 183
183static int get_id_from_freelist(struct blkfront_info *info) 184static int get_id_from_freelist(struct blkfront_info *info)
184{ 185{
@@ -1128,8 +1129,10 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
1128 * Add the used indirect page back to the list of 1129 * Add the used indirect page back to the list of
1129 * available pages for indirect grefs. 1130 * available pages for indirect grefs.
1130 */ 1131 */
1131 indirect_page = pfn_to_page(s->indirect_grants[i]->pfn); 1132 if (!info->feature_persistent) {
1132 list_add(&indirect_page->lru, &info->indirect_pages); 1133 indirect_page = pfn_to_page(s->indirect_grants[i]->pfn);
1134 list_add(&indirect_page->lru, &info->indirect_pages);
1135 }
1133 s->indirect_grants[i]->gref = GRANT_INVALID_REF; 1136 s->indirect_grants[i]->gref = GRANT_INVALID_REF;
1134 list_add_tail(&s->indirect_grants[i]->node, &info->grants); 1137 list_add_tail(&s->indirect_grants[i]->node, &info->grants);
1135 } 1138 }
@@ -1519,7 +1522,7 @@ static int blkif_recover(struct blkfront_info *info)
1519 info->shadow_free = info->ring.req_prod_pvt; 1522 info->shadow_free = info->ring.req_prod_pvt;
1520 info->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff; 1523 info->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
1521 1524
1522 rc = blkfront_setup_indirect(info); 1525 rc = blkfront_gather_backend_features(info);
1523 if (rc) { 1526 if (rc) {
1524 kfree(copy); 1527 kfree(copy);
1525 return rc; 1528 return rc;
@@ -1720,20 +1723,13 @@ static void blkfront_setup_discard(struct blkfront_info *info)
1720 1723
1721static int blkfront_setup_indirect(struct blkfront_info *info) 1724static int blkfront_setup_indirect(struct blkfront_info *info)
1722{ 1725{
1723 unsigned int indirect_segments, segs; 1726 unsigned int segs;
1724 int err, i; 1727 int err, i;
1725 1728
1726 err = xenbus_gather(XBT_NIL, info->xbdev->otherend, 1729 if (info->max_indirect_segments == 0)
1727 "feature-max-indirect-segments", "%u", &indirect_segments,
1728 NULL);
1729 if (err) {
1730 info->max_indirect_segments = 0;
1731 segs = BLKIF_MAX_SEGMENTS_PER_REQUEST; 1730 segs = BLKIF_MAX_SEGMENTS_PER_REQUEST;
1732 } else { 1731 else
1733 info->max_indirect_segments = min(indirect_segments,
1734 xen_blkif_max_segments);
1735 segs = info->max_indirect_segments; 1732 segs = info->max_indirect_segments;
1736 }
1737 1733
1738 err = fill_grant_buffer(info, (segs + INDIRECT_GREFS(segs)) * BLK_RING_SIZE(info)); 1734 err = fill_grant_buffer(info, (segs + INDIRECT_GREFS(segs)) * BLK_RING_SIZE(info));
1739 if (err) 1735 if (err)
@@ -1797,6 +1793,68 @@ out_of_memory:
1797} 1793}
1798 1794
1799/* 1795/*
1796 * Gather all backend feature-*
1797 */
1798static int blkfront_gather_backend_features(struct blkfront_info *info)
1799{
1800 int err;
1801 int barrier, flush, discard, persistent;
1802 unsigned int indirect_segments;
1803
1804 info->feature_flush = 0;
1805
1806 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1807 "feature-barrier", "%d", &barrier,
1808 NULL);
1809
1810 /*
1811 * If there's no "feature-barrier" defined, then it means
1812 * we're dealing with a very old backend which writes
1813 * synchronously; nothing to do.
1814 *
1815 * If there are barriers, then we use flush.
1816 */
1817 if (!err && barrier)
1818 info->feature_flush = REQ_FLUSH | REQ_FUA;
1819 /*
1820 * And if there is "feature-flush-cache" use that above
1821 * barriers.
1822 */
1823 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1824 "feature-flush-cache", "%d", &flush,
1825 NULL);
1826
1827 if (!err && flush)
1828 info->feature_flush = REQ_FLUSH;
1829
1830 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1831 "feature-discard", "%d", &discard,
1832 NULL);
1833
1834 if (!err && discard)
1835 blkfront_setup_discard(info);
1836
1837 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1838 "feature-persistent", "%u", &persistent,
1839 NULL);
1840 if (err)
1841 info->feature_persistent = 0;
1842 else
1843 info->feature_persistent = persistent;
1844
1845 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1846 "feature-max-indirect-segments", "%u", &indirect_segments,
1847 NULL);
1848 if (err)
1849 info->max_indirect_segments = 0;
1850 else
1851 info->max_indirect_segments = min(indirect_segments,
1852 xen_blkif_max_segments);
1853
1854 return blkfront_setup_indirect(info);
1855}
1856
1857/*
1800 * Invoked when the backend is finally 'ready' (and has told produced 1858 * Invoked when the backend is finally 'ready' (and has told produced
1801 * the details about the physical device - #sectors, size, etc). 1859 * the details about the physical device - #sectors, size, etc).
1802 */ 1860 */
@@ -1807,7 +1865,6 @@ static void blkfront_connect(struct blkfront_info *info)
1807 unsigned int physical_sector_size; 1865 unsigned int physical_sector_size;
1808 unsigned int binfo; 1866 unsigned int binfo;
1809 int err; 1867 int err;
1810 int barrier, flush, discard, persistent;
1811 1868
1812 switch (info->connected) { 1869 switch (info->connected) {
1813 case BLKIF_STATE_CONNECTED: 1870 case BLKIF_STATE_CONNECTED:
@@ -1864,48 +1921,7 @@ static void blkfront_connect(struct blkfront_info *info)
1864 if (err != 1) 1921 if (err != 1)
1865 physical_sector_size = sector_size; 1922 physical_sector_size = sector_size;
1866 1923
1867 info->feature_flush = 0; 1924 err = blkfront_gather_backend_features(info);
1868
1869 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1870 "feature-barrier", "%d", &barrier,
1871 NULL);
1872
1873 /*
1874 * If there's no "feature-barrier" defined, then it means
1875 * we're dealing with a very old backend which writes
1876 * synchronously; nothing to do.
1877 *
1878 * If there are barriers, then we use flush.
1879 */
1880 if (!err && barrier)
1881 info->feature_flush = REQ_FLUSH | REQ_FUA;
1882 /*
1883 * And if there is "feature-flush-cache" use that above
1884 * barriers.
1885 */
1886 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1887 "feature-flush-cache", "%d", &flush,
1888 NULL);
1889
1890 if (!err && flush)
1891 info->feature_flush = REQ_FLUSH;
1892
1893 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1894 "feature-discard", "%d", &discard,
1895 NULL);
1896
1897 if (!err && discard)
1898 blkfront_setup_discard(info);
1899
1900 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1901 "feature-persistent", "%u", &persistent,
1902 NULL);
1903 if (err)
1904 info->feature_persistent = 0;
1905 else
1906 info->feature_persistent = persistent;
1907
1908 err = blkfront_setup_indirect(info);
1909 if (err) { 1925 if (err) {
1910 xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s", 1926 xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s",
1911 info->xbdev->otherend); 1927 info->xbdev->otherend);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index fb655e8d1e3b..763301c7828c 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -496,10 +496,9 @@ static void zram_meta_free(struct zram_meta *meta, u64 disksize)
496 kfree(meta); 496 kfree(meta);
497} 497}
498 498
499static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize) 499static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
500{ 500{
501 size_t num_pages; 501 size_t num_pages;
502 char pool_name[8];
503 struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL); 502 struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
504 503
505 if (!meta) 504 if (!meta)
@@ -512,7 +511,6 @@ static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize)
512 goto out_error; 511 goto out_error;
513 } 512 }
514 513
515 snprintf(pool_name, sizeof(pool_name), "zram%d", device_id);
516 meta->mem_pool = zs_create_pool(pool_name, GFP_NOIO | __GFP_HIGHMEM); 514 meta->mem_pool = zs_create_pool(pool_name, GFP_NOIO | __GFP_HIGHMEM);
517 if (!meta->mem_pool) { 515 if (!meta->mem_pool) {
518 pr_err("Error creating memory pool\n"); 516 pr_err("Error creating memory pool\n");
@@ -1031,7 +1029,7 @@ static ssize_t disksize_store(struct device *dev,
1031 return -EINVAL; 1029 return -EINVAL;
1032 1030
1033 disksize = PAGE_ALIGN(disksize); 1031 disksize = PAGE_ALIGN(disksize);
1034 meta = zram_meta_alloc(zram->disk->first_minor, disksize); 1032 meta = zram_meta_alloc(zram->disk->disk_name, disksize);
1035 if (!meta) 1033 if (!meta)
1036 return -ENOMEM; 1034 return -ENOMEM;
1037 1035
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index 1e1a4323a71f..9ceb8ac68fdc 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -472,12 +472,11 @@ int btbcm_setup_apple(struct hci_dev *hdev)
472 472
473 /* Read Verbose Config Version Info */ 473 /* Read Verbose Config Version Info */
474 skb = btbcm_read_verbose_config(hdev); 474 skb = btbcm_read_verbose_config(hdev);
475 if (IS_ERR(skb)) 475 if (!IS_ERR(skb)) {
476 return PTR_ERR(skb); 476 BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1],
477 477 get_unaligned_le16(skb->data + 5));
478 BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1], 478 kfree_skb(skb);
479 get_unaligned_le16(skb->data + 5)); 479 }
480 kfree_skb(skb);
481 480
482 set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); 481 set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
483 482
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index da8faf78536a..5643b65cee20 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -429,7 +429,7 @@ static int hwrng_fillfn(void *unused)
429static void start_khwrngd(void) 429static void start_khwrngd(void)
430{ 430{
431 hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng"); 431 hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
432 if (hwrng_fill == ERR_PTR(-ENOMEM)) { 432 if (IS_ERR(hwrng_fill)) {
433 pr_err("hwrng_fill thread creation failed"); 433 pr_err("hwrng_fill thread creation failed");
434 hwrng_fill = NULL; 434 hwrng_fill = NULL;
435 } 435 }
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 283f00a7f036..1082d4bb016a 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -129,8 +129,9 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev,
129 129
130 device_initialize(&chip->dev); 130 device_initialize(&chip->dev);
131 131
132 chip->cdev.owner = chip->pdev->driver->owner;
133 cdev_init(&chip->cdev, &tpm_fops); 132 cdev_init(&chip->cdev, &tpm_fops);
133 chip->cdev.owner = chip->pdev->driver->owner;
134 chip->cdev.kobj.parent = &chip->dev.kobj;
134 135
135 return chip; 136 return chip;
136} 137}
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index 44f9d20c19ac..1267322595da 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -233,6 +233,14 @@ static int crb_acpi_add(struct acpi_device *device)
233 return -ENODEV; 233 return -ENODEV;
234 } 234 }
235 235
236 /* At least some versions of AMI BIOS have a bug that TPM2 table has
237 * zero address for the control area and therefore we must fail.
238 */
239 if (!buf->control_area_pa) {
240 dev_err(dev, "TPM2 ACPI table has a zero address for the control area\n");
241 return -EINVAL;
242 }
243
236 if (buf->hdr.length < sizeof(struct acpi_tpm2)) { 244 if (buf->hdr.length < sizeof(struct acpi_tpm2)) {
237 dev_err(dev, "TPM2 ACPI table has wrong size"); 245 dev_err(dev, "TPM2 ACPI table has wrong size");
238 return -EINVAL; 246 return -EINVAL;
diff --git a/drivers/clk/pxa/clk-pxa3xx.c b/drivers/clk/pxa/clk-pxa3xx.c
index 4b93a1efb36d..ac03ba49e9d1 100644
--- a/drivers/clk/pxa/clk-pxa3xx.c
+++ b/drivers/clk/pxa/clk-pxa3xx.c
@@ -126,7 +126,7 @@ PARENTS(pxa3xx_ac97_bus) = { "ring_osc_60mhz", "ac97" };
126PARENTS(pxa3xx_sbus) = { "ring_osc_60mhz", "system_bus" }; 126PARENTS(pxa3xx_sbus) = { "ring_osc_60mhz", "system_bus" };
127PARENTS(pxa3xx_smemcbus) = { "ring_osc_60mhz", "smemc" }; 127PARENTS(pxa3xx_smemcbus) = { "ring_osc_60mhz", "smemc" };
128 128
129#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENA : &CKENB) 129#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENB : &CKENA)
130#define PXA3XX_CKEN(dev_id, con_id, parents, mult_lp, div_lp, mult_hp, \ 130#define PXA3XX_CKEN(dev_id, con_id, parents, mult_lp, div_lp, mult_hp, \
131 div_hp, bit, is_lp, flags) \ 131 div_hp, bit, is_lp, flags) \
132 PXA_CKEN(dev_id, con_id, bit, parents, mult_lp, div_lp, \ 132 PXA_CKEN(dev_id, con_id, bit, parents, mult_lp, div_lp, \
diff --git a/drivers/clk/spear/clk-aux-synth.c b/drivers/clk/spear/clk-aux-synth.c
index bdfb4421c643..f271c350ef94 100644
--- a/drivers/clk/spear/clk-aux-synth.c
+++ b/drivers/clk/spear/clk-aux-synth.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2012 ST Microelectronics 2 * Copyright (C) 2012 ST Microelectronics
3 * Viresh Kumar <viresh.linux@gmail.com> 3 * Viresh Kumar <vireshk@kernel.org>
4 * 4 *
5 * This file is licensed under the terms of the GNU General Public 5 * This file is licensed under the terms of the GNU General Public
6 * License version 2. This program is licensed "as is" without any 6 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/clk-frac-synth.c b/drivers/clk/spear/clk-frac-synth.c
index dffd4ce6c8b5..58d678b5b40a 100644
--- a/drivers/clk/spear/clk-frac-synth.c
+++ b/drivers/clk/spear/clk-frac-synth.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2012 ST Microelectronics 2 * Copyright (C) 2012 ST Microelectronics
3 * Viresh Kumar <viresh.linux@gmail.com> 3 * Viresh Kumar <vireshk@kernel.org>
4 * 4 *
5 * This file is licensed under the terms of the GNU General Public 5 * This file is licensed under the terms of the GNU General Public
6 * License version 2. This program is licensed "as is" without any 6 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/clk-gpt-synth.c b/drivers/clk/spear/clk-gpt-synth.c
index 1afc18c4effc..1a722e99e76e 100644
--- a/drivers/clk/spear/clk-gpt-synth.c
+++ b/drivers/clk/spear/clk-gpt-synth.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2012 ST Microelectronics 2 * Copyright (C) 2012 ST Microelectronics
3 * Viresh Kumar <viresh.linux@gmail.com> 3 * Viresh Kumar <vireshk@kernel.org>
4 * 4 *
5 * This file is licensed under the terms of the GNU General Public 5 * This file is licensed under the terms of the GNU General Public
6 * License version 2. This program is licensed "as is" without any 6 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/clk-vco-pll.c b/drivers/clk/spear/clk-vco-pll.c
index 1b9b65bca51e..5ebddc528145 100644
--- a/drivers/clk/spear/clk-vco-pll.c
+++ b/drivers/clk/spear/clk-vco-pll.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2012 ST Microelectronics 2 * Copyright (C) 2012 ST Microelectronics
3 * Viresh Kumar <viresh.linux@gmail.com> 3 * Viresh Kumar <vireshk@kernel.org>
4 * 4 *
5 * This file is licensed under the terms of the GNU General Public 5 * This file is licensed under the terms of the GNU General Public
6 * License version 2. This program is licensed "as is" without any 6 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/clk.c b/drivers/clk/spear/clk.c
index 628b6d5ed3d9..157fe099ea6a 100644
--- a/drivers/clk/spear/clk.c
+++ b/drivers/clk/spear/clk.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2012 ST Microelectronics 2 * Copyright (C) 2012 ST Microelectronics
3 * Viresh Kumar <viresh.linux@gmail.com> 3 * Viresh Kumar <vireshk@kernel.org>
4 * 4 *
5 * This file is licensed under the terms of the GNU General Public 5 * This file is licensed under the terms of the GNU General Public
6 * License version 2. This program is licensed "as is" without any 6 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/clk.h b/drivers/clk/spear/clk.h
index 931737677dfa..9834944f08b1 100644
--- a/drivers/clk/spear/clk.h
+++ b/drivers/clk/spear/clk.h
@@ -2,7 +2,7 @@
2 * Clock framework definitions for SPEAr platform 2 * Clock framework definitions for SPEAr platform
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.linux@gmail.com> 5 * Viresh Kumar <vireshk@kernel.org>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/spear1310_clock.c b/drivers/clk/spear/spear1310_clock.c
index 4daa5977793a..222ce108b41a 100644
--- a/drivers/clk/spear/spear1310_clock.c
+++ b/drivers/clk/spear/spear1310_clock.c
@@ -4,7 +4,7 @@
4 * SPEAr1310 machine clock framework source file 4 * SPEAr1310 machine clock framework source file
5 * 5 *
6 * Copyright (C) 2012 ST Microelectronics 6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.linux@gmail.com> 7 * Viresh Kumar <vireshk@kernel.org>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/spear1340_clock.c b/drivers/clk/spear/spear1340_clock.c
index 5a5c6648308d..973c9d3fbcf8 100644
--- a/drivers/clk/spear/spear1340_clock.c
+++ b/drivers/clk/spear/spear1340_clock.c
@@ -4,7 +4,7 @@
4 * SPEAr1340 machine clock framework source file 4 * SPEAr1340 machine clock framework source file
5 * 5 *
6 * Copyright (C) 2012 ST Microelectronics 6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.linux@gmail.com> 7 * Viresh Kumar <vireshk@kernel.org>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/spear3xx_clock.c b/drivers/clk/spear/spear3xx_clock.c
index bb5f387774e2..404a55edd613 100644
--- a/drivers/clk/spear/spear3xx_clock.c
+++ b/drivers/clk/spear/spear3xx_clock.c
@@ -2,7 +2,7 @@
2 * SPEAr3xx machines clock framework source file 2 * SPEAr3xx machines clock framework source file
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.linux@gmail.com> 5 * Viresh Kumar <vireshk@kernel.org>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/spear6xx_clock.c b/drivers/clk/spear/spear6xx_clock.c
index 4f649c9cb094..231061fa73a4 100644
--- a/drivers/clk/spear/spear6xx_clock.c
+++ b/drivers/clk/spear/spear6xx_clock.c
@@ -2,7 +2,7 @@
2 * SPEAr6xx machines clock framework source file 2 * SPEAr6xx machines clock framework source file
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.linux@gmail.com> 5 * Viresh Kumar <vireshk@kernel.org>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index b8ff3c64cc45..c96de14036a0 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -661,6 +661,9 @@ static void sh_cmt_clocksource_suspend(struct clocksource *cs)
661{ 661{
662 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs); 662 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
663 663
664 if (!ch->cs_enabled)
665 return;
666
664 sh_cmt_stop(ch, FLAG_CLOCKSOURCE); 667 sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
665 pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev); 668 pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev);
666} 669}
@@ -669,6 +672,9 @@ static void sh_cmt_clocksource_resume(struct clocksource *cs)
669{ 672{
670 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs); 673 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
671 674
675 if (!ch->cs_enabled)
676 return;
677
672 pm_genpd_syscore_poweron(&ch->cmt->pdev->dev); 678 pm_genpd_syscore_poweron(&ch->cmt->pdev->dev);
673 sh_cmt_start(ch, FLAG_CLOCKSOURCE); 679 sh_cmt_start(ch, FLAG_CLOCKSOURCE);
674} 680}
diff --git a/drivers/clocksource/timer-imx-gpt.c b/drivers/clocksource/timer-imx-gpt.c
index 2d59038dec43..86c7eb66bdfb 100644
--- a/drivers/clocksource/timer-imx-gpt.c
+++ b/drivers/clocksource/timer-imx-gpt.c
@@ -462,6 +462,7 @@ void __init mxc_timer_init(unsigned long pbase, int irq, enum imx_gpt_type type)
462 BUG_ON(!imxtm->base); 462 BUG_ON(!imxtm->base);
463 463
464 imxtm->type = type; 464 imxtm->type = type;
465 imxtm->irq = irq;
465 466
466 _mxc_timer_init(imxtm); 467 _mxc_timer_init(imxtm);
467} 468}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index b612411655f9..7a3c30c4336f 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -169,6 +169,15 @@ struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
169} 169}
170EXPORT_SYMBOL_GPL(get_governor_parent_kobj); 170EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
171 171
172struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
173{
174 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
175
176 return policy && !policy_is_inactive(policy) ?
177 policy->freq_table : NULL;
178}
179EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
180
172static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) 181static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
173{ 182{
174 u64 idle_time; 183 u64 idle_time;
@@ -993,7 +1002,7 @@ static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
993 int ret = 0; 1002 int ret = 0;
994 1003
995 /* Some related CPUs might not be present (physically hotplugged) */ 1004 /* Some related CPUs might not be present (physically hotplugged) */
996 for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) { 1005 for_each_cpu(j, policy->real_cpus) {
997 if (j == policy->kobj_cpu) 1006 if (j == policy->kobj_cpu)
998 continue; 1007 continue;
999 1008
@@ -1010,7 +1019,7 @@ static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy)
1010 unsigned int j; 1019 unsigned int j;
1011 1020
1012 /* Some related CPUs might not be present (physically hotplugged) */ 1021 /* Some related CPUs might not be present (physically hotplugged) */
1013 for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) { 1022 for_each_cpu(j, policy->real_cpus) {
1014 if (j == policy->kobj_cpu) 1023 if (j == policy->kobj_cpu)
1015 continue; 1024 continue;
1016 1025
@@ -1132,6 +1141,7 @@ static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
1132 1141
1133 down_write(&policy->rwsem); 1142 down_write(&policy->rwsem);
1134 policy->cpu = cpu; 1143 policy->cpu = cpu;
1144 policy->governor = NULL;
1135 up_write(&policy->rwsem); 1145 up_write(&policy->rwsem);
1136 } 1146 }
1137 1147
@@ -1153,11 +1163,14 @@ static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
1153 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) 1163 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1154 goto err_free_cpumask; 1164 goto err_free_cpumask;
1155 1165
1166 if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1167 goto err_free_rcpumask;
1168
1156 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj, 1169 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj,
1157 "cpufreq"); 1170 "cpufreq");
1158 if (ret) { 1171 if (ret) {
1159 pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret); 1172 pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
1160 goto err_free_rcpumask; 1173 goto err_free_real_cpus;
1161 } 1174 }
1162 1175
1163 INIT_LIST_HEAD(&policy->policy_list); 1176 INIT_LIST_HEAD(&policy->policy_list);
@@ -1174,6 +1187,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
1174 1187
1175 return policy; 1188 return policy;
1176 1189
1190err_free_real_cpus:
1191 free_cpumask_var(policy->real_cpus);
1177err_free_rcpumask: 1192err_free_rcpumask:
1178 free_cpumask_var(policy->related_cpus); 1193 free_cpumask_var(policy->related_cpus);
1179err_free_cpumask: 1194err_free_cpumask:
@@ -1224,6 +1239,7 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify)
1224 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1239 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1225 1240
1226 cpufreq_policy_put_kobj(policy, notify); 1241 cpufreq_policy_put_kobj(policy, notify);
1242 free_cpumask_var(policy->real_cpus);
1227 free_cpumask_var(policy->related_cpus); 1243 free_cpumask_var(policy->related_cpus);
1228 free_cpumask_var(policy->cpus); 1244 free_cpumask_var(policy->cpus);
1229 kfree(policy); 1245 kfree(policy);
@@ -1248,14 +1264,17 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1248 1264
1249 pr_debug("adding CPU %u\n", cpu); 1265 pr_debug("adding CPU %u\n", cpu);
1250 1266
1251 /* 1267 if (cpu_is_offline(cpu)) {
1252 * Only possible if 'cpu' wasn't physically present earlier and we are 1268 /*
1253 * here from subsys_interface add callback. A hotplug notifier will 1269 * Only possible if we are here from the subsys_interface add
1254 * follow and we will handle it like logical CPU hotplug then. For now, 1270 * callback. A hotplug notifier will follow and we will handle
1255 * just create the sysfs link. 1271 * it as CPU online then. For now, just create the sysfs link,
1256 */ 1272 * unless there is no policy or the link is already present.
1257 if (cpu_is_offline(cpu)) 1273 */
1258 return add_cpu_dev_symlink(per_cpu(cpufreq_cpu_data, cpu), cpu); 1274 policy = per_cpu(cpufreq_cpu_data, cpu);
1275 return policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus)
1276 ? add_cpu_dev_symlink(policy, cpu) : 0;
1277 }
1259 1278
1260 if (!down_read_trylock(&cpufreq_rwsem)) 1279 if (!down_read_trylock(&cpufreq_rwsem))
1261 return 0; 1280 return 0;
@@ -1297,6 +1316,10 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1297 /* related cpus should atleast have policy->cpus */ 1316 /* related cpus should atleast have policy->cpus */
1298 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus); 1317 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1299 1318
1319 /* Remember which CPUs have been present at the policy creation time. */
1320 if (!recover_policy)
1321 cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask);
1322
1300 /* 1323 /*
1301 * affected cpus must always be the one, which are online. We aren't 1324 * affected cpus must always be the one, which are online. We aren't
1302 * managing offline cpus here. 1325 * managing offline cpus here.
@@ -1410,8 +1433,7 @@ nomem_out:
1410 return ret; 1433 return ret;
1411} 1434}
1412 1435
1413static int __cpufreq_remove_dev_prepare(struct device *dev, 1436static int __cpufreq_remove_dev_prepare(struct device *dev)
1414 struct subsys_interface *sif)
1415{ 1437{
1416 unsigned int cpu = dev->id; 1438 unsigned int cpu = dev->id;
1417 int ret = 0; 1439 int ret = 0;
@@ -1427,10 +1449,8 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
1427 1449
1428 if (has_target()) { 1450 if (has_target()) {
1429 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); 1451 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1430 if (ret) { 1452 if (ret)
1431 pr_err("%s: Failed to stop governor\n", __func__); 1453 pr_err("%s: Failed to stop governor\n", __func__);
1432 return ret;
1433 }
1434 } 1454 }
1435 1455
1436 down_write(&policy->rwsem); 1456 down_write(&policy->rwsem);
@@ -1463,8 +1483,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
1463 return ret; 1483 return ret;
1464} 1484}
1465 1485
1466static int __cpufreq_remove_dev_finish(struct device *dev, 1486static int __cpufreq_remove_dev_finish(struct device *dev)
1467 struct subsys_interface *sif)
1468{ 1487{
1469 unsigned int cpu = dev->id; 1488 unsigned int cpu = dev->id;
1470 int ret; 1489 int ret;
@@ -1482,10 +1501,8 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
1482 /* If cpu is last user of policy, free policy */ 1501 /* If cpu is last user of policy, free policy */
1483 if (has_target()) { 1502 if (has_target()) {
1484 ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); 1503 ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
1485 if (ret) { 1504 if (ret)
1486 pr_err("%s: Failed to exit governor\n", __func__); 1505 pr_err("%s: Failed to exit governor\n", __func__);
1487 return ret;
1488 }
1489 } 1506 }
1490 1507
1491 /* 1508 /*
@@ -1496,10 +1513,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
1496 if (cpufreq_driver->exit) 1513 if (cpufreq_driver->exit)
1497 cpufreq_driver->exit(policy); 1514 cpufreq_driver->exit(policy);
1498 1515
1499 /* Free the policy only if the driver is getting removed. */
1500 if (sif)
1501 cpufreq_policy_free(policy, true);
1502
1503 return 0; 1516 return 0;
1504} 1517}
1505 1518
@@ -1511,42 +1524,41 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
1511static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) 1524static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1512{ 1525{
1513 unsigned int cpu = dev->id; 1526 unsigned int cpu = dev->id;
1514 int ret; 1527 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1515
1516 /*
1517 * Only possible if 'cpu' is getting physically removed now. A hotplug
1518 * notifier should have already been called and we just need to remove
1519 * link or free policy here.
1520 */
1521 if (cpu_is_offline(cpu)) {
1522 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1523 struct cpumask mask;
1524 1528
1525 if (!policy) 1529 if (!policy)
1526 return 0; 1530 return 0;
1527 1531
1528 cpumask_copy(&mask, policy->related_cpus); 1532 if (cpu_online(cpu)) {
1529 cpumask_clear_cpu(cpu, &mask); 1533 __cpufreq_remove_dev_prepare(dev);
1534 __cpufreq_remove_dev_finish(dev);
1535 }
1530 1536
1531 /* 1537 cpumask_clear_cpu(cpu, policy->real_cpus);
1532 * Free policy only if all policy->related_cpus are removed
1533 * physically.
1534 */
1535 if (cpumask_intersects(&mask, cpu_present_mask)) {
1536 remove_cpu_dev_symlink(policy, cpu);
1537 return 0;
1538 }
1539 1538
1539 if (cpumask_empty(policy->real_cpus)) {
1540 cpufreq_policy_free(policy, true); 1540 cpufreq_policy_free(policy, true);
1541 return 0; 1541 return 0;
1542 } 1542 }
1543 1543
1544 ret = __cpufreq_remove_dev_prepare(dev, sif); 1544 if (cpu != policy->kobj_cpu) {
1545 remove_cpu_dev_symlink(policy, cpu);
1546 } else {
1547 /*
1548 * The CPU owning the policy object is going away. Move it to
1549 * another suitable CPU.
1550 */
1551 unsigned int new_cpu = cpumask_first(policy->real_cpus);
1552 struct device *new_dev = get_cpu_device(new_cpu);
1545 1553
1546 if (!ret) 1554 dev_dbg(dev, "%s: Moving policy object to CPU%u\n", __func__, new_cpu);
1547 ret = __cpufreq_remove_dev_finish(dev, sif);
1548 1555
1549 return ret; 1556 sysfs_remove_link(&new_dev->kobj, "cpufreq");
1557 policy->kobj_cpu = new_cpu;
1558 WARN_ON(kobject_move(&policy->kobj, &new_dev->kobj));
1559 }
1560
1561 return 0;
1550} 1562}
1551 1563
1552static void handle_update(struct work_struct *work) 1564static void handle_update(struct work_struct *work)
@@ -2385,11 +2397,11 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
2385 break; 2397 break;
2386 2398
2387 case CPU_DOWN_PREPARE: 2399 case CPU_DOWN_PREPARE:
2388 __cpufreq_remove_dev_prepare(dev, NULL); 2400 __cpufreq_remove_dev_prepare(dev);
2389 break; 2401 break;
2390 2402
2391 case CPU_POST_DEAD: 2403 case CPU_POST_DEAD:
2392 __cpufreq_remove_dev_finish(dev, NULL); 2404 __cpufreq_remove_dev_finish(dev);
2393 break; 2405 break;
2394 2406
2395 case CPU_DOWN_FAILED: 2407 case CPU_DOWN_FAILED:
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
index ae5b2bd3a978..fa3dd840a837 100644
--- a/drivers/cpufreq/exynos-cpufreq.c
+++ b/drivers/cpufreq/exynos-cpufreq.c
@@ -180,7 +180,7 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
180 ret = exynos5250_cpufreq_init(exynos_info); 180 ret = exynos5250_cpufreq_init(exynos_info);
181 } else { 181 } else {
182 pr_err("%s: Unknown SoC type\n", __func__); 182 pr_err("%s: Unknown SoC type\n", __func__);
183 return -ENODEV; 183 ret = -ENODEV;
184 } 184 }
185 185
186 if (ret) 186 if (ret)
@@ -188,12 +188,14 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
188 188
189 if (exynos_info->set_freq == NULL) { 189 if (exynos_info->set_freq == NULL) {
190 dev_err(&pdev->dev, "No set_freq function (ERR)\n"); 190 dev_err(&pdev->dev, "No set_freq function (ERR)\n");
191 ret = -EINVAL;
191 goto err_vdd_arm; 192 goto err_vdd_arm;
192 } 193 }
193 194
194 arm_regulator = regulator_get(NULL, "vdd_arm"); 195 arm_regulator = regulator_get(NULL, "vdd_arm");
195 if (IS_ERR(arm_regulator)) { 196 if (IS_ERR(arm_regulator)) {
196 dev_err(&pdev->dev, "failed to get resource vdd_arm\n"); 197 dev_err(&pdev->dev, "failed to get resource vdd_arm\n");
198 ret = -EINVAL;
197 goto err_vdd_arm; 199 goto err_vdd_arm;
198 } 200 }
199 201
@@ -225,7 +227,7 @@ err_cpufreq_reg:
225 regulator_put(arm_regulator); 227 regulator_put(arm_regulator);
226err_vdd_arm: 228err_vdd_arm:
227 kfree(exynos_info); 229 kfree(exynos_info);
228 return -EINVAL; 230 return ret;
229} 231}
230 232
231static struct platform_driver exynos_cpufreq_platdrv = { 233static struct platform_driver exynos_cpufreq_platdrv = {
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index df14766a8e06..dfbbf981ed56 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -297,15 +297,6 @@ int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
297} 297}
298EXPORT_SYMBOL_GPL(cpufreq_table_validate_and_show); 298EXPORT_SYMBOL_GPL(cpufreq_table_validate_and_show);
299 299
300struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu);
301
302struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
303{
304 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
305 return policy ? policy->freq_table : NULL;
306}
307EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
308
309MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>"); 300MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
310MODULE_DESCRIPTION("CPUfreq frequency table helpers"); 301MODULE_DESCRIPTION("CPUfreq frequency table helpers");
311MODULE_LICENSE("GPL"); 302MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 15ada47bb720..fcb929ec5304 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -681,6 +681,7 @@ static struct cpu_defaults knl_params = {
681 .get_max = core_get_max_pstate, 681 .get_max = core_get_max_pstate,
682 .get_min = core_get_min_pstate, 682 .get_min = core_get_min_pstate,
683 .get_turbo = knl_get_turbo_pstate, 683 .get_turbo = knl_get_turbo_pstate,
684 .get_scaling = core_get_scaling,
684 .set = core_set_pstate, 685 .set = core_set_pstate,
685 }, 686 },
686}; 687};
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index e362860c2b50..cd593c1f66dc 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -20,7 +20,7 @@
20#include <asm/clock.h> 20#include <asm/clock.h>
21#include <asm/idle.h> 21#include <asm/idle.h>
22 22
23#include <asm/mach-loongson/loongson.h> 23#include <asm/mach-loongson64/loongson.h>
24 24
25static uint nowait; 25static uint nowait;
26 26
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index e8e2775c3821..48b7228563ad 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -112,7 +112,12 @@ int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
112static void enter_freeze_proper(struct cpuidle_driver *drv, 112static void enter_freeze_proper(struct cpuidle_driver *drv,
113 struct cpuidle_device *dev, int index) 113 struct cpuidle_device *dev, int index)
114{ 114{
115 tick_freeze(); 115 /*
116 * trace_suspend_resume() called by tick_freeze() for the last CPU
117 * executing it contains RCU usage regarded as invalid in the idle
118 * context, so tell RCU about that.
119 */
120 RCU_NONIDLE(tick_freeze());
116 /* 121 /*
117 * The state used here cannot be a "coupled" one, because the "coupled" 122 * The state used here cannot be a "coupled" one, because the "coupled"
118 * cpuidle mechanism enables interrupts and doing that with timekeeping 123 * cpuidle mechanism enables interrupts and doing that with timekeeping
@@ -122,7 +127,7 @@ static void enter_freeze_proper(struct cpuidle_driver *drv,
122 WARN_ON(!irqs_disabled()); 127 WARN_ON(!irqs_disabled());
123 /* 128 /*
124 * timekeeping_resume() that will be called by tick_unfreeze() for the 129 * timekeeping_resume() that will be called by tick_unfreeze() for the
125 * last CPU executing it calls functions containing RCU read-side 130 * first CPU executing it calls functions containing RCU read-side
126 * critical sections, so tell RCU about that. 131 * critical sections, so tell RCU about that.
127 */ 132 */
128 RCU_NONIDLE(tick_unfreeze()); 133 RCU_NONIDLE(tick_unfreeze());
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index dae1e8099969..f9c78751989e 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -909,13 +909,14 @@ static int ahash_final_ctx(struct ahash_request *req)
909 state->buflen_1; 909 state->buflen_1;
910 u32 *sh_desc = ctx->sh_desc_fin, *desc; 910 u32 *sh_desc = ctx->sh_desc_fin, *desc;
911 dma_addr_t ptr = ctx->sh_desc_fin_dma; 911 dma_addr_t ptr = ctx->sh_desc_fin_dma;
912 int sec4_sg_bytes; 912 int sec4_sg_bytes, sec4_sg_src_index;
913 int digestsize = crypto_ahash_digestsize(ahash); 913 int digestsize = crypto_ahash_digestsize(ahash);
914 struct ahash_edesc *edesc; 914 struct ahash_edesc *edesc;
915 int ret = 0; 915 int ret = 0;
916 int sh_len; 916 int sh_len;
917 917
918 sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry); 918 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
919 sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
919 920
920 /* allocate space for base edesc and hw desc commands, link tables */ 921 /* allocate space for base edesc and hw desc commands, link tables */
921 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + 922 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
@@ -942,7 +943,7 @@ static int ahash_final_ctx(struct ahash_request *req)
942 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, 943 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
943 buf, state->buf_dma, buflen, 944 buf, state->buf_dma, buflen,
944 last_buflen); 945 last_buflen);
945 (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN; 946 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN;
946 947
947 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 948 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
948 sec4_sg_bytes, DMA_TO_DEVICE); 949 sec4_sg_bytes, DMA_TO_DEVICE);
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 7ba495f75370..402631a19a11 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -905,7 +905,6 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
905 crypt->mode |= NPE_OP_NOT_IN_PLACE; 905 crypt->mode |= NPE_OP_NOT_IN_PLACE;
906 /* This was never tested by Intel 906 /* This was never tested by Intel
907 * for more than one dst buffer, I think. */ 907 * for more than one dst buffer, I think. */
908 BUG_ON(req->dst->length < nbytes);
909 req_ctx->dst = NULL; 908 req_ctx->dst = NULL;
910 if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook, 909 if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
911 flags, DMA_FROM_DEVICE)) 910 flags, DMA_FROM_DEVICE))
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
index 67f80813a06f..e4311ce0cd78 100644
--- a/drivers/crypto/nx/nx-aes-ccm.c
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -494,8 +494,9 @@ out:
494static int ccm4309_aes_nx_encrypt(struct aead_request *req) 494static int ccm4309_aes_nx_encrypt(struct aead_request *req)
495{ 495{
496 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); 496 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
497 struct nx_gcm_rctx *rctx = aead_request_ctx(req);
497 struct blkcipher_desc desc; 498 struct blkcipher_desc desc;
498 u8 *iv = nx_ctx->priv.ccm.iv; 499 u8 *iv = rctx->iv;
499 500
500 iv[0] = 3; 501 iv[0] = 3;
501 memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3); 502 memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
@@ -525,8 +526,9 @@ static int ccm_aes_nx_encrypt(struct aead_request *req)
525static int ccm4309_aes_nx_decrypt(struct aead_request *req) 526static int ccm4309_aes_nx_decrypt(struct aead_request *req)
526{ 527{
527 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); 528 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
529 struct nx_gcm_rctx *rctx = aead_request_ctx(req);
528 struct blkcipher_desc desc; 530 struct blkcipher_desc desc;
529 u8 *iv = nx_ctx->priv.ccm.iv; 531 u8 *iv = rctx->iv;
530 532
531 iv[0] = 3; 533 iv[0] = 3;
532 memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3); 534 memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c
index 2617cd4d54dd..dd7e9f3f5b6b 100644
--- a/drivers/crypto/nx/nx-aes-ctr.c
+++ b/drivers/crypto/nx/nx-aes-ctr.c
@@ -72,7 +72,7 @@ static int ctr3686_aes_nx_set_key(struct crypto_tfm *tfm,
72 if (key_len < CTR_RFC3686_NONCE_SIZE) 72 if (key_len < CTR_RFC3686_NONCE_SIZE)
73 return -EINVAL; 73 return -EINVAL;
74 74
75 memcpy(nx_ctx->priv.ctr.iv, 75 memcpy(nx_ctx->priv.ctr.nonce,
76 in_key + key_len - CTR_RFC3686_NONCE_SIZE, 76 in_key + key_len - CTR_RFC3686_NONCE_SIZE,
77 CTR_RFC3686_NONCE_SIZE); 77 CTR_RFC3686_NONCE_SIZE);
78 78
@@ -131,14 +131,15 @@ static int ctr3686_aes_nx_crypt(struct blkcipher_desc *desc,
131 unsigned int nbytes) 131 unsigned int nbytes)
132{ 132{
133 struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm); 133 struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
134 u8 *iv = nx_ctx->priv.ctr.iv; 134 u8 iv[16];
135 135
136 memcpy(iv, nx_ctx->priv.ctr.nonce, CTR_RFC3686_IV_SIZE);
136 memcpy(iv + CTR_RFC3686_NONCE_SIZE, 137 memcpy(iv + CTR_RFC3686_NONCE_SIZE,
137 desc->info, CTR_RFC3686_IV_SIZE); 138 desc->info, CTR_RFC3686_IV_SIZE);
138 iv[12] = iv[13] = iv[14] = 0; 139 iv[12] = iv[13] = iv[14] = 0;
139 iv[15] = 1; 140 iv[15] = 1;
140 141
141 desc->info = nx_ctx->priv.ctr.iv; 142 desc->info = iv;
142 143
143 return ctr_aes_nx_crypt(desc, dst, src, nbytes); 144 return ctr_aes_nx_crypt(desc, dst, src, nbytes);
144} 145}
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
index 08ac6d48688c..92c993f08213 100644
--- a/drivers/crypto/nx/nx-aes-gcm.c
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -317,6 +317,7 @@ out:
317static int gcm_aes_nx_crypt(struct aead_request *req, int enc) 317static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
318{ 318{
319 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); 319 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
320 struct nx_gcm_rctx *rctx = aead_request_ctx(req);
320 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 321 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
321 struct blkcipher_desc desc; 322 struct blkcipher_desc desc;
322 unsigned int nbytes = req->cryptlen; 323 unsigned int nbytes = req->cryptlen;
@@ -326,7 +327,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
326 327
327 spin_lock_irqsave(&nx_ctx->lock, irq_flags); 328 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
328 329
329 desc.info = nx_ctx->priv.gcm.iv; 330 desc.info = rctx->iv;
330 /* initialize the counter */ 331 /* initialize the counter */
331 *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1; 332 *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;
332 333
@@ -424,8 +425,8 @@ out:
424 425
425static int gcm_aes_nx_encrypt(struct aead_request *req) 426static int gcm_aes_nx_encrypt(struct aead_request *req)
426{ 427{
427 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); 428 struct nx_gcm_rctx *rctx = aead_request_ctx(req);
428 char *iv = nx_ctx->priv.gcm.iv; 429 char *iv = rctx->iv;
429 430
430 memcpy(iv, req->iv, 12); 431 memcpy(iv, req->iv, 12);
431 432
@@ -434,8 +435,8 @@ static int gcm_aes_nx_encrypt(struct aead_request *req)
434 435
435static int gcm_aes_nx_decrypt(struct aead_request *req) 436static int gcm_aes_nx_decrypt(struct aead_request *req)
436{ 437{
437 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); 438 struct nx_gcm_rctx *rctx = aead_request_ctx(req);
438 char *iv = nx_ctx->priv.gcm.iv; 439 char *iv = rctx->iv;
439 440
440 memcpy(iv, req->iv, 12); 441 memcpy(iv, req->iv, 12);
441 442
@@ -445,7 +446,8 @@ static int gcm_aes_nx_decrypt(struct aead_request *req)
445static int gcm4106_aes_nx_encrypt(struct aead_request *req) 446static int gcm4106_aes_nx_encrypt(struct aead_request *req)
446{ 447{
447 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); 448 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
448 char *iv = nx_ctx->priv.gcm.iv; 449 struct nx_gcm_rctx *rctx = aead_request_ctx(req);
450 char *iv = rctx->iv;
449 char *nonce = nx_ctx->priv.gcm.nonce; 451 char *nonce = nx_ctx->priv.gcm.nonce;
450 452
451 memcpy(iv, nonce, NX_GCM4106_NONCE_LEN); 453 memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
@@ -457,7 +459,8 @@ static int gcm4106_aes_nx_encrypt(struct aead_request *req)
457static int gcm4106_aes_nx_decrypt(struct aead_request *req) 459static int gcm4106_aes_nx_decrypt(struct aead_request *req)
458{ 460{
459 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); 461 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
460 char *iv = nx_ctx->priv.gcm.iv; 462 struct nx_gcm_rctx *rctx = aead_request_ctx(req);
463 char *iv = rctx->iv;
461 char *nonce = nx_ctx->priv.gcm.nonce; 464 char *nonce = nx_ctx->priv.gcm.nonce;
462 465
463 memcpy(iv, nonce, NX_GCM4106_NONCE_LEN); 466 memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c
index 8c2faffab4a3..c2f7d4befb55 100644
--- a/drivers/crypto/nx/nx-aes-xcbc.c
+++ b/drivers/crypto/nx/nx-aes-xcbc.c
@@ -42,6 +42,7 @@ static int nx_xcbc_set_key(struct crypto_shash *desc,
42 unsigned int key_len) 42 unsigned int key_len)
43{ 43{
44 struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc); 44 struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc);
45 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
45 46
46 switch (key_len) { 47 switch (key_len) {
47 case AES_KEYSIZE_128: 48 case AES_KEYSIZE_128:
@@ -51,7 +52,7 @@ static int nx_xcbc_set_key(struct crypto_shash *desc,
51 return -EINVAL; 52 return -EINVAL;
52 } 53 }
53 54
54 memcpy(nx_ctx->priv.xcbc.key, in_key, key_len); 55 memcpy(csbcpb->cpb.aes_xcbc.key, in_key, key_len);
55 56
56 return 0; 57 return 0;
57} 58}
@@ -148,32 +149,29 @@ out:
148 return rc; 149 return rc;
149} 150}
150 151
151static int nx_xcbc_init(struct shash_desc *desc) 152static int nx_crypto_ctx_aes_xcbc_init2(struct crypto_tfm *tfm)
152{ 153{
153 struct xcbc_state *sctx = shash_desc_ctx(desc); 154 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
154 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
155 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 155 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
156 struct nx_sg *out_sg; 156 int err;
157 int len;
158 157
159 nx_ctx_init(nx_ctx, HCOP_FC_AES); 158 err = nx_crypto_ctx_aes_xcbc_init(tfm);
159 if (err)
160 return err;
160 161
161 memset(sctx, 0, sizeof *sctx); 162 nx_ctx_init(nx_ctx, HCOP_FC_AES);
162 163
163 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128); 164 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
164 csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC; 165 csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC;
165 166
166 memcpy(csbcpb->cpb.aes_xcbc.key, nx_ctx->priv.xcbc.key, AES_BLOCK_SIZE); 167 return 0;
167 memset(nx_ctx->priv.xcbc.key, 0, sizeof *nx_ctx->priv.xcbc.key); 168}
168
169 len = AES_BLOCK_SIZE;
170 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
171 &len, nx_ctx->ap->sglen);
172 169
173 if (len != AES_BLOCK_SIZE) 170static int nx_xcbc_init(struct shash_desc *desc)
174 return -EINVAL; 171{
172 struct xcbc_state *sctx = shash_desc_ctx(desc);
175 173
176 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); 174 memset(sctx, 0, sizeof *sctx);
177 175
178 return 0; 176 return 0;
179} 177}
@@ -186,6 +184,7 @@ static int nx_xcbc_update(struct shash_desc *desc,
186 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 184 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
187 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 185 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
188 struct nx_sg *in_sg; 186 struct nx_sg *in_sg;
187 struct nx_sg *out_sg;
189 u32 to_process = 0, leftover, total; 188 u32 to_process = 0, leftover, total;
190 unsigned int max_sg_len; 189 unsigned int max_sg_len;
191 unsigned long irq_flags; 190 unsigned long irq_flags;
@@ -213,6 +212,17 @@ static int nx_xcbc_update(struct shash_desc *desc,
213 max_sg_len = min_t(u64, max_sg_len, 212 max_sg_len = min_t(u64, max_sg_len,
214 nx_ctx->ap->databytelen/NX_PAGE_SIZE); 213 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
215 214
215 data_len = AES_BLOCK_SIZE;
216 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
217 &len, nx_ctx->ap->sglen);
218
219 if (data_len != AES_BLOCK_SIZE) {
220 rc = -EINVAL;
221 goto out;
222 }
223
224 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
225
216 do { 226 do {
217 to_process = total - to_process; 227 to_process = total - to_process;
218 to_process = to_process & ~(AES_BLOCK_SIZE - 1); 228 to_process = to_process & ~(AES_BLOCK_SIZE - 1);
@@ -235,8 +245,10 @@ static int nx_xcbc_update(struct shash_desc *desc,
235 (u8 *) sctx->buffer, 245 (u8 *) sctx->buffer,
236 &data_len, 246 &data_len,
237 max_sg_len); 247 max_sg_len);
238 if (data_len != sctx->count) 248 if (data_len != sctx->count) {
239 return -EINVAL; 249 rc = -EINVAL;
250 goto out;
251 }
240 } 252 }
241 253
242 data_len = to_process - sctx->count; 254 data_len = to_process - sctx->count;
@@ -245,8 +257,10 @@ static int nx_xcbc_update(struct shash_desc *desc,
245 &data_len, 257 &data_len,
246 max_sg_len); 258 max_sg_len);
247 259
248 if (data_len != to_process - sctx->count) 260 if (data_len != to_process - sctx->count) {
249 return -EINVAL; 261 rc = -EINVAL;
262 goto out;
263 }
250 264
251 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * 265 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
252 sizeof(struct nx_sg); 266 sizeof(struct nx_sg);
@@ -325,15 +339,19 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
325 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer, 339 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer,
326 &len, nx_ctx->ap->sglen); 340 &len, nx_ctx->ap->sglen);
327 341
328 if (len != sctx->count) 342 if (len != sctx->count) {
329 return -EINVAL; 343 rc = -EINVAL;
344 goto out;
345 }
330 346
331 len = AES_BLOCK_SIZE; 347 len = AES_BLOCK_SIZE;
332 out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, 348 out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
333 nx_ctx->ap->sglen); 349 nx_ctx->ap->sglen);
334 350
335 if (len != AES_BLOCK_SIZE) 351 if (len != AES_BLOCK_SIZE) {
336 return -EINVAL; 352 rc = -EINVAL;
353 goto out;
354 }
337 355
338 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); 356 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
339 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); 357 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
@@ -372,7 +390,7 @@ struct shash_alg nx_shash_aes_xcbc_alg = {
372 .cra_blocksize = AES_BLOCK_SIZE, 390 .cra_blocksize = AES_BLOCK_SIZE,
373 .cra_module = THIS_MODULE, 391 .cra_module = THIS_MODULE,
374 .cra_ctxsize = sizeof(struct nx_crypto_ctx), 392 .cra_ctxsize = sizeof(struct nx_crypto_ctx),
375 .cra_init = nx_crypto_ctx_aes_xcbc_init, 393 .cra_init = nx_crypto_ctx_aes_xcbc_init2,
376 .cra_exit = nx_crypto_ctx_exit, 394 .cra_exit = nx_crypto_ctx_exit,
377 } 395 }
378}; 396};
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
index 4e91bdb83c59..becb738c897b 100644
--- a/drivers/crypto/nx/nx-sha256.c
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -29,34 +29,28 @@
29#include "nx.h" 29#include "nx.h"
30 30
31 31
32static int nx_sha256_init(struct shash_desc *desc) 32static int nx_crypto_ctx_sha256_init(struct crypto_tfm *tfm)
33{ 33{
34 struct sha256_state *sctx = shash_desc_ctx(desc); 34 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
35 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 35 int err;
36 struct nx_sg *out_sg;
37 int len;
38 u32 max_sg_len;
39 36
40 nx_ctx_init(nx_ctx, HCOP_FC_SHA); 37 err = nx_crypto_ctx_sha_init(tfm);
38 if (err)
39 return err;
41 40
42 memset(sctx, 0, sizeof *sctx); 41 nx_ctx_init(nx_ctx, HCOP_FC_SHA);
43 42
44 nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA256]; 43 nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA256];
45 44
46 NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256); 45 NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256);
47 46
48 max_sg_len = min_t(u64, nx_ctx->ap->sglen, 47 return 0;
49 nx_driver.of.max_sg_len/sizeof(struct nx_sg)); 48}
50 max_sg_len = min_t(u64, max_sg_len,
51 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
52 49
53 len = SHA256_DIGEST_SIZE; 50static int nx_sha256_init(struct shash_desc *desc) {
54 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, 51 struct sha256_state *sctx = shash_desc_ctx(desc);
55 &len, max_sg_len);
56 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
57 52
58 if (len != SHA256_DIGEST_SIZE) 53 memset(sctx, 0, sizeof *sctx);
59 return -EINVAL;
60 54
61 sctx->state[0] = __cpu_to_be32(SHA256_H0); 55 sctx->state[0] = __cpu_to_be32(SHA256_H0);
62 sctx->state[1] = __cpu_to_be32(SHA256_H1); 56 sctx->state[1] = __cpu_to_be32(SHA256_H1);
@@ -77,7 +71,7 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
77 struct sha256_state *sctx = shash_desc_ctx(desc); 71 struct sha256_state *sctx = shash_desc_ctx(desc);
78 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 72 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
79 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; 73 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
80 struct nx_sg *in_sg; 74 struct nx_sg *out_sg;
81 u64 to_process = 0, leftover, total; 75 u64 to_process = 0, leftover, total;
82 unsigned long irq_flags; 76 unsigned long irq_flags;
83 int rc = 0; 77 int rc = 0;
@@ -102,24 +96,28 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
102 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; 96 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
103 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; 97 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
104 98
105 in_sg = nx_ctx->in_sg;
106 max_sg_len = min_t(u64, nx_ctx->ap->sglen, 99 max_sg_len = min_t(u64, nx_ctx->ap->sglen,
107 nx_driver.of.max_sg_len/sizeof(struct nx_sg)); 100 nx_driver.of.max_sg_len/sizeof(struct nx_sg));
108 max_sg_len = min_t(u64, max_sg_len, 101 max_sg_len = min_t(u64, max_sg_len,
109 nx_ctx->ap->databytelen/NX_PAGE_SIZE); 102 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
110 103
104 data_len = SHA256_DIGEST_SIZE;
105 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
106 &data_len, max_sg_len);
107 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
108
109 if (data_len != SHA256_DIGEST_SIZE) {
110 rc = -EINVAL;
111 goto out;
112 }
113
111 do { 114 do {
112 /* 115 int used_sgs = 0;
113 * to_process: the SHA256_BLOCK_SIZE data chunk to process in 116 struct nx_sg *in_sg = nx_ctx->in_sg;
114 * this update. This value is also restricted by the sg list
115 * limits.
116 */
117 to_process = total - to_process;
118 to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
119 117
120 if (buf_len) { 118 if (buf_len) {
121 data_len = buf_len; 119 data_len = buf_len;
122 in_sg = nx_build_sg_list(nx_ctx->in_sg, 120 in_sg = nx_build_sg_list(in_sg,
123 (u8 *) sctx->buf, 121 (u8 *) sctx->buf,
124 &data_len, 122 &data_len,
125 max_sg_len); 123 max_sg_len);
@@ -128,15 +126,27 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
128 rc = -EINVAL; 126 rc = -EINVAL;
129 goto out; 127 goto out;
130 } 128 }
129 used_sgs = in_sg - nx_ctx->in_sg;
131 } 130 }
132 131
132 /* to_process: SHA256_BLOCK_SIZE aligned chunk to be
133 * processed in this iteration. This value is restricted
134 * by sg list limits and number of sgs we already used
135 * for leftover data. (see above)
136 * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
137 * but because data may not be aligned, we need to account
138 * for that too. */
139 to_process = min_t(u64, total,
140 (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
141 to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
142
133 data_len = to_process - buf_len; 143 data_len = to_process - buf_len;
134 in_sg = nx_build_sg_list(in_sg, (u8 *) data, 144 in_sg = nx_build_sg_list(in_sg, (u8 *) data,
135 &data_len, max_sg_len); 145 &data_len, max_sg_len);
136 146
137 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); 147 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
138 148
139 to_process = (data_len + buf_len); 149 to_process = data_len + buf_len;
140 leftover = total - to_process; 150 leftover = total - to_process;
141 151
142 /* 152 /*
@@ -282,7 +292,7 @@ struct shash_alg nx_shash_sha256_alg = {
282 .cra_blocksize = SHA256_BLOCK_SIZE, 292 .cra_blocksize = SHA256_BLOCK_SIZE,
283 .cra_module = THIS_MODULE, 293 .cra_module = THIS_MODULE,
284 .cra_ctxsize = sizeof(struct nx_crypto_ctx), 294 .cra_ctxsize = sizeof(struct nx_crypto_ctx),
285 .cra_init = nx_crypto_ctx_sha_init, 295 .cra_init = nx_crypto_ctx_sha256_init,
286 .cra_exit = nx_crypto_ctx_exit, 296 .cra_exit = nx_crypto_ctx_exit,
287 } 297 }
288}; 298};
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
index e6a58d2ee628..b6e183d58d73 100644
--- a/drivers/crypto/nx/nx-sha512.c
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -28,34 +28,29 @@
28#include "nx.h" 28#include "nx.h"
29 29
30 30
31static int nx_sha512_init(struct shash_desc *desc) 31static int nx_crypto_ctx_sha512_init(struct crypto_tfm *tfm)
32{ 32{
33 struct sha512_state *sctx = shash_desc_ctx(desc); 33 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
34 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 34 int err;
35 struct nx_sg *out_sg;
36 int len;
37 u32 max_sg_len;
38 35
39 nx_ctx_init(nx_ctx, HCOP_FC_SHA); 36 err = nx_crypto_ctx_sha_init(tfm);
37 if (err)
38 return err;
40 39
41 memset(sctx, 0, sizeof *sctx); 40 nx_ctx_init(nx_ctx, HCOP_FC_SHA);
42 41
43 nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA512]; 42 nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA512];
44 43
45 NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512); 44 NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512);
46 45
47 max_sg_len = min_t(u64, nx_ctx->ap->sglen, 46 return 0;
48 nx_driver.of.max_sg_len/sizeof(struct nx_sg)); 47}
49 max_sg_len = min_t(u64, max_sg_len,
50 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
51 48
52 len = SHA512_DIGEST_SIZE; 49static int nx_sha512_init(struct shash_desc *desc)
53 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, 50{
54 &len, max_sg_len); 51 struct sha512_state *sctx = shash_desc_ctx(desc);
55 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
56 52
57 if (len != SHA512_DIGEST_SIZE) 53 memset(sctx, 0, sizeof *sctx);
58 return -EINVAL;
59 54
60 sctx->state[0] = __cpu_to_be64(SHA512_H0); 55 sctx->state[0] = __cpu_to_be64(SHA512_H0);
61 sctx->state[1] = __cpu_to_be64(SHA512_H1); 56 sctx->state[1] = __cpu_to_be64(SHA512_H1);
@@ -76,7 +71,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
76 struct sha512_state *sctx = shash_desc_ctx(desc); 71 struct sha512_state *sctx = shash_desc_ctx(desc);
77 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 72 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
78 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; 73 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
79 struct nx_sg *in_sg; 74 struct nx_sg *out_sg;
80 u64 to_process, leftover = 0, total; 75 u64 to_process, leftover = 0, total;
81 unsigned long irq_flags; 76 unsigned long irq_flags;
82 int rc = 0; 77 int rc = 0;
@@ -101,25 +96,28 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
101 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; 96 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
102 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; 97 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
103 98
104 in_sg = nx_ctx->in_sg;
105 max_sg_len = min_t(u64, nx_ctx->ap->sglen, 99 max_sg_len = min_t(u64, nx_ctx->ap->sglen,
106 nx_driver.of.max_sg_len/sizeof(struct nx_sg)); 100 nx_driver.of.max_sg_len/sizeof(struct nx_sg));
107 max_sg_len = min_t(u64, max_sg_len, 101 max_sg_len = min_t(u64, max_sg_len,
108 nx_ctx->ap->databytelen/NX_PAGE_SIZE); 102 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
109 103
104 data_len = SHA512_DIGEST_SIZE;
105 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
106 &data_len, max_sg_len);
107 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
108
109 if (data_len != SHA512_DIGEST_SIZE) {
110 rc = -EINVAL;
111 goto out;
112 }
113
110 do { 114 do {
111 /* 115 int used_sgs = 0;
112 * to_process: the SHA512_BLOCK_SIZE data chunk to process in 116 struct nx_sg *in_sg = nx_ctx->in_sg;
113 * this update. This value is also restricted by the sg list
114 * limits.
115 */
116 to_process = total - leftover;
117 to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
118 leftover = total - to_process;
119 117
120 if (buf_len) { 118 if (buf_len) {
121 data_len = buf_len; 119 data_len = buf_len;
122 in_sg = nx_build_sg_list(nx_ctx->in_sg, 120 in_sg = nx_build_sg_list(in_sg,
123 (u8 *) sctx->buf, 121 (u8 *) sctx->buf,
124 &data_len, max_sg_len); 122 &data_len, max_sg_len);
125 123
@@ -127,8 +125,20 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
127 rc = -EINVAL; 125 rc = -EINVAL;
128 goto out; 126 goto out;
129 } 127 }
128 used_sgs = in_sg - nx_ctx->in_sg;
130 } 129 }
131 130
131 /* to_process: SHA512_BLOCK_SIZE aligned chunk to be
132 * processed in this iteration. This value is restricted
133 * by sg list limits and number of sgs we already used
134 * for leftover data. (see above)
135 * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
136 * but because data may not be aligned, we need to account
137 * for that too. */
138 to_process = min_t(u64, total,
139 (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
140 to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
141
132 data_len = to_process - buf_len; 142 data_len = to_process - buf_len;
133 in_sg = nx_build_sg_list(in_sg, (u8 *) data, 143 in_sg = nx_build_sg_list(in_sg, (u8 *) data,
134 &data_len, max_sg_len); 144 &data_len, max_sg_len);
@@ -140,7 +150,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
140 goto out; 150 goto out;
141 } 151 }
142 152
143 to_process = (data_len + buf_len); 153 to_process = data_len + buf_len;
144 leftover = total - to_process; 154 leftover = total - to_process;
145 155
146 /* 156 /*
@@ -288,7 +298,7 @@ struct shash_alg nx_shash_sha512_alg = {
288 .cra_blocksize = SHA512_BLOCK_SIZE, 298 .cra_blocksize = SHA512_BLOCK_SIZE,
289 .cra_module = THIS_MODULE, 299 .cra_module = THIS_MODULE,
290 .cra_ctxsize = sizeof(struct nx_crypto_ctx), 300 .cra_ctxsize = sizeof(struct nx_crypto_ctx),
291 .cra_init = nx_crypto_ctx_sha_init, 301 .cra_init = nx_crypto_ctx_sha512_init,
292 .cra_exit = nx_crypto_ctx_exit, 302 .cra_exit = nx_crypto_ctx_exit,
293 } 303 }
294}; 304};
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index f6198f29a4a8..436971343ff7 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -713,12 +713,15 @@ static int nx_crypto_ctx_init(struct nx_crypto_ctx *nx_ctx, u32 fc, u32 mode)
713/* entry points from the crypto tfm initializers */ 713/* entry points from the crypto tfm initializers */
714int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm) 714int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm)
715{ 715{
716 crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
717 sizeof(struct nx_ccm_rctx));
716 return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES, 718 return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
717 NX_MODE_AES_CCM); 719 NX_MODE_AES_CCM);
718} 720}
719 721
720int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm) 722int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm)
721{ 723{
724 crypto_aead_set_reqsize(tfm, sizeof(struct nx_gcm_rctx));
722 return nx_crypto_ctx_init(crypto_aead_ctx(tfm), NX_FC_AES, 725 return nx_crypto_ctx_init(crypto_aead_ctx(tfm), NX_FC_AES,
723 NX_MODE_AES_GCM); 726 NX_MODE_AES_GCM);
724} 727}
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
index de3ea8738146..cdff03a42ae7 100644
--- a/drivers/crypto/nx/nx.h
+++ b/drivers/crypto/nx/nx.h
@@ -2,6 +2,8 @@
2#ifndef __NX_H__ 2#ifndef __NX_H__
3#define __NX_H__ 3#define __NX_H__
4 4
5#include <crypto/ctr.h>
6
5#define NX_NAME "nx-crypto" 7#define NX_NAME "nx-crypto"
6#define NX_STRING "IBM Power7+ Nest Accelerator Crypto Driver" 8#define NX_STRING "IBM Power7+ Nest Accelerator Crypto Driver"
7#define NX_VERSION "1.0" 9#define NX_VERSION "1.0"
@@ -91,8 +93,11 @@ struct nx_crypto_driver {
91 93
92#define NX_GCM4106_NONCE_LEN (4) 94#define NX_GCM4106_NONCE_LEN (4)
93#define NX_GCM_CTR_OFFSET (12) 95#define NX_GCM_CTR_OFFSET (12)
94struct nx_gcm_priv { 96struct nx_gcm_rctx {
95 u8 iv[16]; 97 u8 iv[16];
98};
99
100struct nx_gcm_priv {
96 u8 iauth_tag[16]; 101 u8 iauth_tag[16];
97 u8 nonce[NX_GCM4106_NONCE_LEN]; 102 u8 nonce[NX_GCM4106_NONCE_LEN];
98}; 103};
@@ -100,8 +105,11 @@ struct nx_gcm_priv {
100#define NX_CCM_AES_KEY_LEN (16) 105#define NX_CCM_AES_KEY_LEN (16)
101#define NX_CCM4309_AES_KEY_LEN (19) 106#define NX_CCM4309_AES_KEY_LEN (19)
102#define NX_CCM4309_NONCE_LEN (3) 107#define NX_CCM4309_NONCE_LEN (3)
103struct nx_ccm_priv { 108struct nx_ccm_rctx {
104 u8 iv[16]; 109 u8 iv[16];
110};
111
112struct nx_ccm_priv {
105 u8 b0[16]; 113 u8 b0[16];
106 u8 iauth_tag[16]; 114 u8 iauth_tag[16];
107 u8 oauth_tag[16]; 115 u8 oauth_tag[16];
@@ -113,7 +121,7 @@ struct nx_xcbc_priv {
113}; 121};
114 122
115struct nx_ctr_priv { 123struct nx_ctr_priv {
116 u8 iv[16]; 124 u8 nonce[CTR_RFC3686_NONCE_SIZE];
117}; 125};
118 126
119struct nx_crypto_ctx { 127struct nx_crypto_ctx {
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index 46307098f8ba..0a70e46d5416 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -536,9 +536,6 @@ static int omap_des_crypt_dma_stop(struct omap_des_dev *dd)
536 dmaengine_terminate_all(dd->dma_lch_in); 536 dmaengine_terminate_all(dd->dma_lch_in);
537 dmaengine_terminate_all(dd->dma_lch_out); 537 dmaengine_terminate_all(dd->dma_lch_out);
538 538
539 dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
540 dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE);
541
542 return err; 539 return err;
543} 540}
544 541
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 067402c7c2a9..df427c0e9e7b 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -73,7 +73,8 @@
73 ICP_QAT_HW_CIPHER_KEY_CONVERT, \ 73 ICP_QAT_HW_CIPHER_KEY_CONVERT, \
74 ICP_QAT_HW_CIPHER_DECRYPT) 74 ICP_QAT_HW_CIPHER_DECRYPT)
75 75
76static atomic_t active_dev; 76static DEFINE_MUTEX(algs_lock);
77static unsigned int active_devs;
77 78
78struct qat_alg_buf { 79struct qat_alg_buf {
79 uint32_t len; 80 uint32_t len;
@@ -1280,7 +1281,10 @@ static struct crypto_alg qat_algs[] = { {
1280 1281
1281int qat_algs_register(void) 1282int qat_algs_register(void)
1282{ 1283{
1283 if (atomic_add_return(1, &active_dev) == 1) { 1284 int ret = 0;
1285
1286 mutex_lock(&algs_lock);
1287 if (++active_devs == 1) {
1284 int i; 1288 int i;
1285 1289
1286 for (i = 0; i < ARRAY_SIZE(qat_algs); i++) 1290 for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
@@ -1289,21 +1293,25 @@ int qat_algs_register(void)
1289 CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC : 1293 CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC :
1290 CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; 1294 CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
1291 1295
1292 return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs)); 1296 ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
1293 } 1297 }
1294 return 0; 1298 mutex_unlock(&algs_lock);
1299 return ret;
1295} 1300}
1296 1301
1297int qat_algs_unregister(void) 1302int qat_algs_unregister(void)
1298{ 1303{
1299 if (atomic_sub_return(1, &active_dev) == 0) 1304 int ret = 0;
1300 return crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs)); 1305
1301 return 0; 1306 mutex_lock(&algs_lock);
1307 if (--active_devs == 0)
1308 ret = crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1309 mutex_unlock(&algs_lock);
1310 return ret;
1302} 1311}
1303 1312
1304int qat_algs_init(void) 1313int qat_algs_init(void)
1305{ 1314{
1306 atomic_set(&active_dev, 0);
1307 crypto_get_default_rng(); 1315 crypto_get_default_rng();
1308 return 0; 1316 return 0;
1309} 1317}
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 59892126d175..d3629b7482dd 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -48,6 +48,8 @@
48 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\ 48 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
49 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) 49 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
50 50
51#define ATC_MAX_DSCR_TRIALS 10
52
51/* 53/*
52 * Initial number of descriptors to allocate for each channel. This could 54 * Initial number of descriptors to allocate for each channel. This could
53 * be increased during dma usage. 55 * be increased during dma usage.
@@ -285,28 +287,19 @@ static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan,
285 * 287 *
286 * @current_len: the number of bytes left before reading CTRLA 288 * @current_len: the number of bytes left before reading CTRLA
287 * @ctrla: the value of CTRLA 289 * @ctrla: the value of CTRLA
288 * @desc: the descriptor containing the transfer width
289 */ 290 */
290static inline int atc_calc_bytes_left(int current_len, u32 ctrla, 291static inline int atc_calc_bytes_left(int current_len, u32 ctrla)
291 struct at_desc *desc)
292{ 292{
293 return current_len - ((ctrla & ATC_BTSIZE_MAX) << desc->tx_width); 293 u32 btsize = (ctrla & ATC_BTSIZE_MAX);
294} 294 u32 src_width = ATC_REG_TO_SRC_WIDTH(ctrla);
295 295
296/** 296 /*
297 * atc_calc_bytes_left_from_reg - calculates the number of bytes left according 297 * According to the datasheet, when reading the Control A Register
298 * to the current value of CTRLA. 298 * (ctrla), the Buffer Transfer Size (btsize) bitfield refers to the
299 * 299 * number of transfers completed on the Source Interface.
300 * @current_len: the number of bytes left before reading CTRLA 300 * So btsize is always a number of source width transfers.
301 * @atchan: the channel to read CTRLA for 301 */
302 * @desc: the descriptor containing the transfer width 302 return current_len - (btsize << src_width);
303 */
304static inline int atc_calc_bytes_left_from_reg(int current_len,
305 struct at_dma_chan *atchan, struct at_desc *desc)
306{
307 u32 ctrla = channel_readl(atchan, CTRLA);
308
309 return atc_calc_bytes_left(current_len, ctrla, desc);
310} 303}
311 304
312/** 305/**
@@ -320,7 +313,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
320 struct at_desc *desc_first = atc_first_active(atchan); 313 struct at_desc *desc_first = atc_first_active(atchan);
321 struct at_desc *desc; 314 struct at_desc *desc;
322 int ret; 315 int ret;
323 u32 ctrla, dscr; 316 u32 ctrla, dscr, trials;
324 317
325 /* 318 /*
326 * If the cookie doesn't match to the currently running transfer then 319 * If the cookie doesn't match to the currently running transfer then
@@ -346,15 +339,82 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
346 * the channel's DSCR register and compare it against the value 339 * the channel's DSCR register and compare it against the value
347 * of the hardware linked list structure of each child 340 * of the hardware linked list structure of each child
348 * descriptor. 341 * descriptor.
342 *
343 * The CTRLA register provides us with the amount of data
344 * already read from the source for the current child
345 * descriptor. So we can compute a more accurate residue by also
346 * removing the number of bytes corresponding to this amount of
347 * data.
348 *
349 * However, the DSCR and CTRLA registers cannot be read both
350 * atomically. Hence a race condition may occur: the first read
351 * register may refer to one child descriptor whereas the second
352 * read may refer to a later child descriptor in the list
353 * because of the DMA transfer progression inbetween the two
354 * reads.
355 *
356 * One solution could have been to pause the DMA transfer, read
357 * the DSCR and CTRLA then resume the DMA transfer. Nonetheless,
358 * this approach presents some drawbacks:
359 * - If the DMA transfer is paused, RX overruns or TX underruns
360 * are more likey to occur depending on the system latency.
361 * Taking the USART driver as an example, it uses a cyclic DMA
362 * transfer to read data from the Receive Holding Register
363 * (RHR) to avoid RX overruns since the RHR is not protected
364 * by any FIFO on most Atmel SoCs. So pausing the DMA transfer
365 * to compute the residue would break the USART driver design.
366 * - The atc_pause() function masks interrupts but we'd rather
367 * avoid to do so for system latency purpose.
368 *
369 * Then we'd rather use another solution: the DSCR is read a
370 * first time, the CTRLA is read in turn, next the DSCR is read
371 * a second time. If the two consecutive read values of the DSCR
372 * are the same then we assume both refers to the very same
373 * child descriptor as well as the CTRLA value read inbetween
374 * does. For cyclic tranfers, the assumption is that a full loop
375 * is "not so fast".
376 * If the two DSCR values are different, we read again the CTRLA
377 * then the DSCR till two consecutive read values from DSCR are
378 * equal or till the maxium trials is reach.
379 * This algorithm is very unlikely not to find a stable value for
380 * DSCR.
349 */ 381 */
350 382
351 ctrla = channel_readl(atchan, CTRLA);
352 rmb(); /* ensure CTRLA is read before DSCR */
353 dscr = channel_readl(atchan, DSCR); 383 dscr = channel_readl(atchan, DSCR);
384 rmb(); /* ensure DSCR is read before CTRLA */
385 ctrla = channel_readl(atchan, CTRLA);
386 for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) {
387 u32 new_dscr;
388
389 rmb(); /* ensure DSCR is read after CTRLA */
390 new_dscr = channel_readl(atchan, DSCR);
391
392 /*
393 * If the DSCR register value has not changed inside the
394 * DMA controller since the previous read, we assume
395 * that both the dscr and ctrla values refers to the
396 * very same descriptor.
397 */
398 if (likely(new_dscr == dscr))
399 break;
400
401 /*
402 * DSCR has changed inside the DMA controller, so the
403 * previouly read value of CTRLA may refer to an already
404 * processed descriptor hence could be outdated.
405 * We need to update ctrla to match the current
406 * descriptor.
407 */
408 dscr = new_dscr;
409 rmb(); /* ensure DSCR is read before CTRLA */
410 ctrla = channel_readl(atchan, CTRLA);
411 }
412 if (unlikely(trials >= ATC_MAX_DSCR_TRIALS))
413 return -ETIMEDOUT;
354 414
355 /* for the first descriptor we can be more accurate */ 415 /* for the first descriptor we can be more accurate */
356 if (desc_first->lli.dscr == dscr) 416 if (desc_first->lli.dscr == dscr)
357 return atc_calc_bytes_left(ret, ctrla, desc_first); 417 return atc_calc_bytes_left(ret, ctrla);
358 418
359 ret -= desc_first->len; 419 ret -= desc_first->len;
360 list_for_each_entry(desc, &desc_first->tx_list, desc_node) { 420 list_for_each_entry(desc, &desc_first->tx_list, desc_node) {
@@ -365,16 +425,14 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
365 } 425 }
366 426
367 /* 427 /*
368 * For the last descriptor in the chain we can calculate 428 * For the current descriptor in the chain we can calculate
369 * the remaining bytes using the channel's register. 429 * the remaining bytes using the channel's register.
370 * Note that the transfer width of the first and last
371 * descriptor may differ.
372 */ 430 */
373 if (!desc->lli.dscr) 431 ret = atc_calc_bytes_left(ret, ctrla);
374 ret = atc_calc_bytes_left_from_reg(ret, atchan, desc);
375 } else { 432 } else {
376 /* single transfer */ 433 /* single transfer */
377 ret = atc_calc_bytes_left_from_reg(ret, atchan, desc_first); 434 ctrla = channel_readl(atchan, CTRLA);
435 ret = atc_calc_bytes_left(ret, ctrla);
378 } 436 }
379 437
380 return ret; 438 return ret;
@@ -726,7 +784,6 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
726 784
727 desc->txd.cookie = -EBUSY; 785 desc->txd.cookie = -EBUSY;
728 desc->total_len = desc->len = len; 786 desc->total_len = desc->len = len;
729 desc->tx_width = dwidth;
730 787
731 /* set end-of-link to the last link descriptor of list*/ 788 /* set end-of-link to the last link descriptor of list*/
732 set_desc_eol(desc); 789 set_desc_eol(desc);
@@ -804,10 +861,6 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
804 first->txd.cookie = -EBUSY; 861 first->txd.cookie = -EBUSY;
805 first->total_len = len; 862 first->total_len = len;
806 863
807 /* set transfer width for the calculation of the residue */
808 first->tx_width = src_width;
809 prev->tx_width = src_width;
810
811 /* set end-of-link to the last link descriptor of list*/ 864 /* set end-of-link to the last link descriptor of list*/
812 set_desc_eol(desc); 865 set_desc_eol(desc);
813 866
@@ -956,10 +1009,6 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
956 first->txd.cookie = -EBUSY; 1009 first->txd.cookie = -EBUSY;
957 first->total_len = total_len; 1010 first->total_len = total_len;
958 1011
959 /* set transfer width for the calculation of the residue */
960 first->tx_width = reg_width;
961 prev->tx_width = reg_width;
962
963 /* first link descriptor of list is responsible of flags */ 1012 /* first link descriptor of list is responsible of flags */
964 first->txd.flags = flags; /* client is in control of this ack */ 1013 first->txd.flags = flags; /* client is in control of this ack */
965 1014
@@ -1077,12 +1126,6 @@ atc_prep_dma_sg(struct dma_chan *chan,
1077 desc->txd.cookie = 0; 1126 desc->txd.cookie = 0;
1078 desc->len = len; 1127 desc->len = len;
1079 1128
1080 /*
1081 * Although we only need the transfer width for the first and
1082 * the last descriptor, its easier to set it to all descriptors.
1083 */
1084 desc->tx_width = src_width;
1085
1086 atc_desc_chain(&first, &prev, desc); 1129 atc_desc_chain(&first, &prev, desc);
1087 1130
1088 /* update the lengths and addresses for the next loop cycle */ 1131 /* update the lengths and addresses for the next loop cycle */
@@ -1256,7 +1299,6 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1256 /* First descriptor of the chain embedds additional information */ 1299 /* First descriptor of the chain embedds additional information */
1257 first->txd.cookie = -EBUSY; 1300 first->txd.cookie = -EBUSY;
1258 first->total_len = buf_len; 1301 first->total_len = buf_len;
1259 first->tx_width = reg_width;
1260 1302
1261 return &first->txd; 1303 return &first->txd;
1262 1304
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index bc8d5ebedd19..7f5a08230f76 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -112,6 +112,7 @@
112#define ATC_SRC_WIDTH_BYTE (0x0 << 24) 112#define ATC_SRC_WIDTH_BYTE (0x0 << 24)
113#define ATC_SRC_WIDTH_HALFWORD (0x1 << 24) 113#define ATC_SRC_WIDTH_HALFWORD (0x1 << 24)
114#define ATC_SRC_WIDTH_WORD (0x2 << 24) 114#define ATC_SRC_WIDTH_WORD (0x2 << 24)
115#define ATC_REG_TO_SRC_WIDTH(r) (((r) >> 24) & 0x3)
115#define ATC_DST_WIDTH_MASK (0x3 << 28) /* Destination Single Transfer Size */ 116#define ATC_DST_WIDTH_MASK (0x3 << 28) /* Destination Single Transfer Size */
116#define ATC_DST_WIDTH(x) ((x) << 28) 117#define ATC_DST_WIDTH(x) ((x) << 28)
117#define ATC_DST_WIDTH_BYTE (0x0 << 28) 118#define ATC_DST_WIDTH_BYTE (0x0 << 28)
@@ -182,7 +183,6 @@ struct at_lli {
182 * @txd: support for the async_tx api 183 * @txd: support for the async_tx api
183 * @desc_node: node on the channed descriptors list 184 * @desc_node: node on the channed descriptors list
184 * @len: descriptor byte count 185 * @len: descriptor byte count
185 * @tx_width: transfer width
186 * @total_len: total transaction byte count 186 * @total_len: total transaction byte count
187 */ 187 */
188struct at_desc { 188struct at_desc {
@@ -194,7 +194,6 @@ struct at_desc {
194 struct dma_async_tx_descriptor txd; 194 struct dma_async_tx_descriptor txd;
195 struct list_head desc_node; 195 struct list_head desc_node;
196 size_t len; 196 size_t len;
197 u32 tx_width;
198 size_t total_len; 197 size_t total_len;
199 198
200 /* Interleaved data */ 199 /* Interleaved data */
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index cf1213de7865..40afa2a16cfc 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -359,18 +359,19 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
359 * descriptor view 2 since some fields of the configuration register 359 * descriptor view 2 since some fields of the configuration register
360 * depend on transfer size and src/dest addresses. 360 * depend on transfer size and src/dest addresses.
361 */ 361 */
362 if (at_xdmac_chan_is_cyclic(atchan)) { 362 if (at_xdmac_chan_is_cyclic(atchan))
363 reg = AT_XDMAC_CNDC_NDVIEW_NDV1; 363 reg = AT_XDMAC_CNDC_NDVIEW_NDV1;
364 at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg); 364 else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3)
365 } else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3) {
366 reg = AT_XDMAC_CNDC_NDVIEW_NDV3; 365 reg = AT_XDMAC_CNDC_NDVIEW_NDV3;
367 } else { 366 else
368 /*
369 * No need to write AT_XDMAC_CC reg, it will be done when the
370 * descriptor is fecthed.
371 */
372 reg = AT_XDMAC_CNDC_NDVIEW_NDV2; 367 reg = AT_XDMAC_CNDC_NDVIEW_NDV2;
373 } 368 /*
369 * Even if the register will be updated from the configuration in the
370 * descriptor when using view 2 or higher, the PROT bit won't be set
371 * properly. This bit can be modified only by using the channel
372 * configuration register.
373 */
374 at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg);
374 375
375 reg |= AT_XDMAC_CNDC_NDDUP 376 reg |= AT_XDMAC_CNDC_NDDUP
376 | AT_XDMAC_CNDC_NDSUP 377 | AT_XDMAC_CNDC_NDSUP
@@ -681,15 +682,16 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
681 desc->lld.mbr_sa = mem; 682 desc->lld.mbr_sa = mem;
682 desc->lld.mbr_da = atchan->sconfig.dst_addr; 683 desc->lld.mbr_da = atchan->sconfig.dst_addr;
683 } 684 }
684 desc->lld.mbr_cfg = atchan->cfg; 685 dwidth = at_xdmac_get_dwidth(atchan->cfg);
685 dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
686 fixed_dwidth = IS_ALIGNED(len, 1 << dwidth) 686 fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
687 ? at_xdmac_get_dwidth(desc->lld.mbr_cfg) 687 ? dwidth
688 : AT_XDMAC_CC_DWIDTH_BYTE; 688 : AT_XDMAC_CC_DWIDTH_BYTE;
689 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */ 689 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */
690 | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */ 690 | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */
691 | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */ 691 | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */
692 | (len >> fixed_dwidth); /* microblock length */ 692 | (len >> fixed_dwidth); /* microblock length */
693 desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) |
694 AT_XDMAC_CC_DWIDTH(fixed_dwidth);
693 dev_dbg(chan2dev(chan), 695 dev_dbg(chan2dev(chan),
694 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n", 696 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
695 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc); 697 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 4a4cce15f25d..3ff284c8e3d5 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -689,6 +689,10 @@ struct dma_chan *dma_request_slave_channel(struct device *dev,
689 struct dma_chan *ch = dma_request_slave_channel_reason(dev, name); 689 struct dma_chan *ch = dma_request_slave_channel_reason(dev, name);
690 if (IS_ERR(ch)) 690 if (IS_ERR(ch))
691 return NULL; 691 return NULL;
692
693 dma_cap_set(DMA_PRIVATE, ch->device->cap_mask);
694 ch->device->privatecnt++;
695
692 return ch; 696 return ch;
693} 697}
694EXPORT_SYMBOL_GPL(dma_request_slave_channel); 698EXPORT_SYMBOL_GPL(dma_request_slave_channel);
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 1022c2e1a2b0..cf1c87fa1edd 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -1746,4 +1746,4 @@ EXPORT_SYMBOL_GPL(dw_dma_enable);
1746MODULE_LICENSE("GPL v2"); 1746MODULE_LICENSE("GPL v2");
1747MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver"); 1747MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver");
1748MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); 1748MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1749MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); 1749MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index fbaf1ead2597..f1325f62563e 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -162,10 +162,11 @@ static void mv_chan_set_mode(struct mv_xor_chan *chan,
162 config &= ~0x7; 162 config &= ~0x7;
163 config |= op_mode; 163 config |= op_mode;
164 164
165 if (IS_ENABLED(__BIG_ENDIAN)) 165#if defined(__BIG_ENDIAN)
166 config |= XOR_DESCRIPTOR_SWAP; 166 config |= XOR_DESCRIPTOR_SWAP;
167 else 167#else
168 config &= ~XOR_DESCRIPTOR_SWAP; 168 config &= ~XOR_DESCRIPTOR_SWAP;
169#endif
169 170
170 writel_relaxed(config, XOR_CONFIG(chan)); 171 writel_relaxed(config, XOR_CONFIG(chan));
171 chan->current_type = type; 172 chan->current_type = type;
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index f513f77b1d85..ecab4ea059b4 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2328,7 +2328,7 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
2328 desc->txd.callback = last->txd.callback; 2328 desc->txd.callback = last->txd.callback;
2329 desc->txd.callback_param = last->txd.callback_param; 2329 desc->txd.callback_param = last->txd.callback_param;
2330 } 2330 }
2331 last->last = false; 2331 desc->last = false;
2332 2332
2333 dma_cookie_assign(&desc->txd); 2333 dma_cookie_assign(&desc->txd);
2334 2334
@@ -2623,6 +2623,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
2623 desc->rqcfg.brst_len = 1; 2623 desc->rqcfg.brst_len = 1;
2624 2624
2625 desc->rqcfg.brst_len = get_burst_len(desc, len); 2625 desc->rqcfg.brst_len = get_burst_len(desc, len);
2626 desc->bytes_requested = len;
2626 2627
2627 desc->txd.flags = flags; 2628 desc->txd.flags = flags;
2628 2629
diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c
index 7d2c17d8d30f..6f80432a3f0a 100644
--- a/drivers/dma/virt-dma.c
+++ b/drivers/dma/virt-dma.c
@@ -29,7 +29,7 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
29 spin_lock_irqsave(&vc->lock, flags); 29 spin_lock_irqsave(&vc->lock, flags);
30 cookie = dma_cookie_assign(tx); 30 cookie = dma_cookie_assign(tx);
31 31
32 list_move_tail(&vd->node, &vc->desc_submitted); 32 list_add_tail(&vd->node, &vc->desc_submitted);
33 spin_unlock_irqrestore(&vc->lock, flags); 33 spin_unlock_irqrestore(&vc->lock, flags);
34 34
35 dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n", 35 dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
@@ -83,10 +83,8 @@ static void vchan_complete(unsigned long arg)
83 cb_data = vd->tx.callback_param; 83 cb_data = vd->tx.callback_param;
84 84
85 list_del(&vd->node); 85 list_del(&vd->node);
86 if (async_tx_test_ack(&vd->tx)) 86
87 list_add(&vd->node, &vc->desc_allocated); 87 vc->desc_free(vd);
88 else
89 vc->desc_free(vd);
90 88
91 if (cb) 89 if (cb)
92 cb(cb_data); 90 cb(cb_data);
@@ -98,13 +96,9 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
98 while (!list_empty(head)) { 96 while (!list_empty(head)) {
99 struct virt_dma_desc *vd = list_first_entry(head, 97 struct virt_dma_desc *vd = list_first_entry(head,
100 struct virt_dma_desc, node); 98 struct virt_dma_desc, node);
101 if (async_tx_test_ack(&vd->tx)) { 99 list_del(&vd->node);
102 list_move_tail(&vd->node, &vc->desc_allocated); 100 dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
103 } else { 101 vc->desc_free(vd);
104 dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
105 list_del(&vd->node);
106 vc->desc_free(vd);
107 }
108 } 102 }
109} 103}
110EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list); 104EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
@@ -114,7 +108,6 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
114 dma_cookie_init(&vc->chan); 108 dma_cookie_init(&vc->chan);
115 109
116 spin_lock_init(&vc->lock); 110 spin_lock_init(&vc->lock);
117 INIT_LIST_HEAD(&vc->desc_allocated);
118 INIT_LIST_HEAD(&vc->desc_submitted); 111 INIT_LIST_HEAD(&vc->desc_submitted);
119 INIT_LIST_HEAD(&vc->desc_issued); 112 INIT_LIST_HEAD(&vc->desc_issued);
120 INIT_LIST_HEAD(&vc->desc_completed); 113 INIT_LIST_HEAD(&vc->desc_completed);
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h
index 189e75dbcb15..181b95267866 100644
--- a/drivers/dma/virt-dma.h
+++ b/drivers/dma/virt-dma.h
@@ -29,7 +29,6 @@ struct virt_dma_chan {
29 spinlock_t lock; 29 spinlock_t lock;
30 30
31 /* protected by vc.lock */ 31 /* protected by vc.lock */
32 struct list_head desc_allocated;
33 struct list_head desc_submitted; 32 struct list_head desc_submitted;
34 struct list_head desc_issued; 33 struct list_head desc_issued;
35 struct list_head desc_completed; 34 struct list_head desc_completed;
@@ -56,16 +55,11 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan
56 struct virt_dma_desc *vd, unsigned long tx_flags) 55 struct virt_dma_desc *vd, unsigned long tx_flags)
57{ 56{
58 extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *); 57 extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
59 unsigned long flags;
60 58
61 dma_async_tx_descriptor_init(&vd->tx, &vc->chan); 59 dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
62 vd->tx.flags = tx_flags; 60 vd->tx.flags = tx_flags;
63 vd->tx.tx_submit = vchan_tx_submit; 61 vd->tx.tx_submit = vchan_tx_submit;
64 62
65 spin_lock_irqsave(&vc->lock, flags);
66 list_add_tail(&vd->node, &vc->desc_allocated);
67 spin_unlock_irqrestore(&vc->lock, flags);
68
69 return &vd->tx; 63 return &vd->tx;
70} 64}
71 65
@@ -128,8 +122,7 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
128} 122}
129 123
130/** 124/**
131 * vchan_get_all_descriptors - obtain all allocated, submitted and issued 125 * vchan_get_all_descriptors - obtain all submitted and issued descriptors
132 * descriptors
133 * vc: virtual channel to get descriptors from 126 * vc: virtual channel to get descriptors from
134 * head: list of descriptors found 127 * head: list of descriptors found
135 * 128 *
@@ -141,7 +134,6 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
141static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, 134static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
142 struct list_head *head) 135 struct list_head *head)
143{ 136{
144 list_splice_tail_init(&vc->desc_allocated, head);
145 list_splice_tail_init(&vc->desc_submitted, head); 137 list_splice_tail_init(&vc->desc_submitted, head);
146 list_splice_tail_init(&vc->desc_issued, head); 138 list_splice_tail_init(&vc->desc_issued, head);
147 list_splice_tail_init(&vc->desc_completed, head); 139 list_splice_tail_init(&vc->desc_completed, head);
@@ -149,14 +141,11 @@ static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
149 141
150static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) 142static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
151{ 143{
152 struct virt_dma_desc *vd;
153 unsigned long flags; 144 unsigned long flags;
154 LIST_HEAD(head); 145 LIST_HEAD(head);
155 146
156 spin_lock_irqsave(&vc->lock, flags); 147 spin_lock_irqsave(&vc->lock, flags);
157 vchan_get_all_descriptors(vc, &head); 148 vchan_get_all_descriptors(vc, &head);
158 list_for_each_entry(vd, &head, node)
159 async_tx_clear_ack(&vd->tx);
160 spin_unlock_irqrestore(&vc->lock, flags); 149 spin_unlock_irqrestore(&vc->lock, flags);
161 150
162 vchan_dma_desc_free_list(vc, &head); 151 vchan_dma_desc_free_list(vc, &head);
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index 620fd55ec766..dff22ab01851 100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -111,6 +111,7 @@
111#define XGENE_DMA_MEM_RAM_SHUTDOWN 0xD070 111#define XGENE_DMA_MEM_RAM_SHUTDOWN 0xD070
112#define XGENE_DMA_BLK_MEM_RDY 0xD074 112#define XGENE_DMA_BLK_MEM_RDY 0xD074
113#define XGENE_DMA_BLK_MEM_RDY_VAL 0xFFFFFFFF 113#define XGENE_DMA_BLK_MEM_RDY_VAL 0xFFFFFFFF
114#define XGENE_DMA_RING_CMD_SM_OFFSET 0x8000
114 115
115/* X-Gene SoC EFUSE csr register and bit defination */ 116/* X-Gene SoC EFUSE csr register and bit defination */
116#define XGENE_SOC_JTAG1_SHADOW 0x18 117#define XGENE_SOC_JTAG1_SHADOW 0x18
@@ -1887,6 +1888,8 @@ static int xgene_dma_get_resources(struct platform_device *pdev,
1887 return -ENOMEM; 1888 return -ENOMEM;
1888 } 1889 }
1889 1890
1891 pdma->csr_ring_cmd += XGENE_DMA_RING_CMD_SM_OFFSET;
1892
1890 /* Get efuse csr region */ 1893 /* Get efuse csr region */
1891 res = platform_get_resource(pdev, IORESOURCE_MEM, 3); 1894 res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
1892 if (!res) { 1895 if (!res) {
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index 3515b381c131..711d8ad74f11 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -920,7 +920,7 @@ static int ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
920 */ 920 */
921 921
922 for (row = 0; row < mci->nr_csrows; row++) { 922 for (row = 0; row < mci->nr_csrows; row++) {
923 struct csrow_info *csi = &mci->csrows[row]; 923 struct csrow_info *csi = mci->csrows[row];
924 924
925 /* 925 /*
926 * Get the configuration settings for this 926 * Get the configuration settings for this
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
index f4f3b3d53928..35b9e118b2fb 100644
--- a/drivers/extcon/extcon-max77693.c
+++ b/drivers/extcon/extcon-max77693.c
@@ -24,6 +24,7 @@
24#include <linux/err.h> 24#include <linux/err.h>
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26#include <linux/mfd/max77693.h> 26#include <linux/mfd/max77693.h>
27#include <linux/mfd/max77693-common.h>
27#include <linux/mfd/max77693-private.h> 28#include <linux/mfd/max77693-private.h>
28#include <linux/extcon.h> 29#include <linux/extcon.h>
29#include <linux/regmap.h> 30#include <linux/regmap.h>
@@ -42,7 +43,7 @@ static struct max77693_reg_data default_init_data[] = {
42 { 43 {
43 /* STATUS2 - [3]ChgDetRun */ 44 /* STATUS2 - [3]ChgDetRun */
44 .addr = MAX77693_MUIC_REG_STATUS2, 45 .addr = MAX77693_MUIC_REG_STATUS2,
45 .data = STATUS2_CHGDETRUN_MASK, 46 .data = MAX77693_STATUS2_CHGDETRUN_MASK,
46 }, { 47 }, {
47 /* INTMASK1 - Unmask [3]ADC1KM,[0]ADCM */ 48 /* INTMASK1 - Unmask [3]ADC1KM,[0]ADCM */
48 .addr = MAX77693_MUIC_REG_INTMASK1, 49 .addr = MAX77693_MUIC_REG_INTMASK1,
@@ -235,7 +236,7 @@ static int max77693_muic_set_debounce_time(struct max77693_muic_info *info,
235 */ 236 */
236 ret = regmap_write(info->max77693->regmap_muic, 237 ret = regmap_write(info->max77693->regmap_muic,
237 MAX77693_MUIC_REG_CTRL3, 238 MAX77693_MUIC_REG_CTRL3,
238 time << CONTROL3_ADCDBSET_SHIFT); 239 time << MAX77693_CONTROL3_ADCDBSET_SHIFT);
239 if (ret) { 240 if (ret) {
240 dev_err(info->dev, "failed to set ADC debounce time\n"); 241 dev_err(info->dev, "failed to set ADC debounce time\n");
241 return ret; 242 return ret;
@@ -268,7 +269,7 @@ static int max77693_muic_set_path(struct max77693_muic_info *info,
268 if (attached) 269 if (attached)
269 ctrl1 = val; 270 ctrl1 = val;
270 else 271 else
271 ctrl1 = CONTROL1_SW_OPEN; 272 ctrl1 = MAX77693_CONTROL1_SW_OPEN;
272 273
273 ret = regmap_update_bits(info->max77693->regmap_muic, 274 ret = regmap_update_bits(info->max77693->regmap_muic,
274 MAX77693_MUIC_REG_CTRL1, COMP_SW_MASK, ctrl1); 275 MAX77693_MUIC_REG_CTRL1, COMP_SW_MASK, ctrl1);
@@ -278,13 +279,14 @@ static int max77693_muic_set_path(struct max77693_muic_info *info,
278 } 279 }
279 280
280 if (attached) 281 if (attached)
281 ctrl2 |= CONTROL2_CPEN_MASK; /* LowPwr=0, CPEn=1 */ 282 ctrl2 |= MAX77693_CONTROL2_CPEN_MASK; /* LowPwr=0, CPEn=1 */
282 else 283 else
283 ctrl2 |= CONTROL2_LOWPWR_MASK; /* LowPwr=1, CPEn=0 */ 284 ctrl2 |= MAX77693_CONTROL2_LOWPWR_MASK; /* LowPwr=1, CPEn=0 */
284 285
285 ret = regmap_update_bits(info->max77693->regmap_muic, 286 ret = regmap_update_bits(info->max77693->regmap_muic,
286 MAX77693_MUIC_REG_CTRL2, 287 MAX77693_MUIC_REG_CTRL2,
287 CONTROL2_LOWPWR_MASK | CONTROL2_CPEN_MASK, ctrl2); 288 MAX77693_CONTROL2_LOWPWR_MASK | MAX77693_CONTROL2_CPEN_MASK,
289 ctrl2);
288 if (ret < 0) { 290 if (ret < 0) {
289 dev_err(info->dev, "failed to update MUIC register\n"); 291 dev_err(info->dev, "failed to update MUIC register\n");
290 return ret; 292 return ret;
@@ -326,8 +328,8 @@ static int max77693_muic_get_cable_type(struct max77693_muic_info *info,
326 * Read ADC value to check cable type and decide cable state 328 * Read ADC value to check cable type and decide cable state
327 * according to cable type 329 * according to cable type
328 */ 330 */
329 adc = info->status[0] & STATUS1_ADC_MASK; 331 adc = info->status[0] & MAX77693_STATUS1_ADC_MASK;
330 adc >>= STATUS1_ADC_SHIFT; 332 adc >>= MAX77693_STATUS1_ADC_SHIFT;
331 333
332 /* 334 /*
333 * Check current cable state/cable type and store cable type 335 * Check current cable state/cable type and store cable type
@@ -350,8 +352,8 @@ static int max77693_muic_get_cable_type(struct max77693_muic_info *info,
350 * Read ADC value to check cable type and decide cable state 352 * Read ADC value to check cable type and decide cable state
351 * according to cable type 353 * according to cable type
352 */ 354 */
353 adc = info->status[0] & STATUS1_ADC_MASK; 355 adc = info->status[0] & MAX77693_STATUS1_ADC_MASK;
354 adc >>= STATUS1_ADC_SHIFT; 356 adc >>= MAX77693_STATUS1_ADC_SHIFT;
355 357
356 /* 358 /*
357 * Check current cable state/cable type and store cable type 359 * Check current cable state/cable type and store cable type
@@ -366,13 +368,13 @@ static int max77693_muic_get_cable_type(struct max77693_muic_info *info,
366 } else { 368 } else {
367 *attached = true; 369 *attached = true;
368 370
369 adclow = info->status[0] & STATUS1_ADCLOW_MASK; 371 adclow = info->status[0] & MAX77693_STATUS1_ADCLOW_MASK;
370 adclow >>= STATUS1_ADCLOW_SHIFT; 372 adclow >>= MAX77693_STATUS1_ADCLOW_SHIFT;
371 adc1k = info->status[0] & STATUS1_ADC1K_MASK; 373 adc1k = info->status[0] & MAX77693_STATUS1_ADC1K_MASK;
372 adc1k >>= STATUS1_ADC1K_SHIFT; 374 adc1k >>= MAX77693_STATUS1_ADC1K_SHIFT;
373 375
374 vbvolt = info->status[1] & STATUS2_VBVOLT_MASK; 376 vbvolt = info->status[1] & MAX77693_STATUS2_VBVOLT_MASK;
375 vbvolt >>= STATUS2_VBVOLT_SHIFT; 377 vbvolt >>= MAX77693_STATUS2_VBVOLT_SHIFT;
376 378
377 /** 379 /**
378 * [0x1|VBVolt|ADCLow|ADC1K] 380 * [0x1|VBVolt|ADCLow|ADC1K]
@@ -397,8 +399,8 @@ static int max77693_muic_get_cable_type(struct max77693_muic_info *info,
397 * Read charger type to check cable type and decide cable state 399 * Read charger type to check cable type and decide cable state
398 * according to type of charger cable. 400 * according to type of charger cable.
399 */ 401 */
400 chg_type = info->status[1] & STATUS2_CHGTYP_MASK; 402 chg_type = info->status[1] & MAX77693_STATUS2_CHGTYP_MASK;
401 chg_type >>= STATUS2_CHGTYP_SHIFT; 403 chg_type >>= MAX77693_STATUS2_CHGTYP_SHIFT;
402 404
403 if (chg_type == MAX77693_CHARGER_TYPE_NONE) { 405 if (chg_type == MAX77693_CHARGER_TYPE_NONE) {
404 *attached = false; 406 *attached = false;
@@ -422,10 +424,10 @@ static int max77693_muic_get_cable_type(struct max77693_muic_info *info,
422 * Read ADC value to check cable type and decide cable state 424 * Read ADC value to check cable type and decide cable state
423 * according to cable type 425 * according to cable type
424 */ 426 */
425 adc = info->status[0] & STATUS1_ADC_MASK; 427 adc = info->status[0] & MAX77693_STATUS1_ADC_MASK;
426 adc >>= STATUS1_ADC_SHIFT; 428 adc >>= MAX77693_STATUS1_ADC_SHIFT;
427 chg_type = info->status[1] & STATUS2_CHGTYP_MASK; 429 chg_type = info->status[1] & MAX77693_STATUS2_CHGTYP_MASK;
428 chg_type >>= STATUS2_CHGTYP_SHIFT; 430 chg_type >>= MAX77693_STATUS2_CHGTYP_SHIFT;
429 431
430 if (adc == MAX77693_MUIC_ADC_OPEN 432 if (adc == MAX77693_MUIC_ADC_OPEN
431 && chg_type == MAX77693_CHARGER_TYPE_NONE) 433 && chg_type == MAX77693_CHARGER_TYPE_NONE)
@@ -437,8 +439,8 @@ static int max77693_muic_get_cable_type(struct max77693_muic_info *info,
437 * Read vbvolt field, if vbvolt is 1, 439 * Read vbvolt field, if vbvolt is 1,
438 * this cable is used for charging. 440 * this cable is used for charging.
439 */ 441 */
440 vbvolt = info->status[1] & STATUS2_VBVOLT_MASK; 442 vbvolt = info->status[1] & MAX77693_STATUS2_VBVOLT_MASK;
441 vbvolt >>= STATUS2_VBVOLT_SHIFT; 443 vbvolt >>= MAX77693_STATUS2_VBVOLT_SHIFT;
442 444
443 cable_type = vbvolt; 445 cable_type = vbvolt;
444 break; 446 break;
@@ -520,7 +522,8 @@ static int max77693_muic_dock_handler(struct max77693_muic_info *info,
520 } 522 }
521 523
522 /* Dock-Car/Desk/Audio, PATH:AUDIO */ 524 /* Dock-Car/Desk/Audio, PATH:AUDIO */
523 ret = max77693_muic_set_path(info, CONTROL1_SW_AUDIO, attached); 525 ret = max77693_muic_set_path(info, MAX77693_CONTROL1_SW_AUDIO,
526 attached);
524 if (ret < 0) 527 if (ret < 0)
525 return ret; 528 return ret;
526 extcon_set_cable_state_(info->edev, dock_id, attached); 529 extcon_set_cable_state_(info->edev, dock_id, attached);
@@ -585,14 +588,16 @@ static int max77693_muic_adc_ground_handler(struct max77693_muic_info *info)
585 case MAX77693_MUIC_GND_USB_HOST: 588 case MAX77693_MUIC_GND_USB_HOST:
586 case MAX77693_MUIC_GND_USB_HOST_VB: 589 case MAX77693_MUIC_GND_USB_HOST_VB:
587 /* USB_HOST, PATH: AP_USB */ 590 /* USB_HOST, PATH: AP_USB */
588 ret = max77693_muic_set_path(info, CONTROL1_SW_USB, attached); 591 ret = max77693_muic_set_path(info, MAX77693_CONTROL1_SW_USB,
592 attached);
589 if (ret < 0) 593 if (ret < 0)
590 return ret; 594 return ret;
591 extcon_set_cable_state_(info->edev, EXTCON_USB_HOST, attached); 595 extcon_set_cable_state_(info->edev, EXTCON_USB_HOST, attached);
592 break; 596 break;
593 case MAX77693_MUIC_GND_AV_CABLE_LOAD: 597 case MAX77693_MUIC_GND_AV_CABLE_LOAD:
594 /* Audio Video Cable with load, PATH:AUDIO */ 598 /* Audio Video Cable with load, PATH:AUDIO */
595 ret = max77693_muic_set_path(info, CONTROL1_SW_AUDIO, attached); 599 ret = max77693_muic_set_path(info, MAX77693_CONTROL1_SW_AUDIO,
600 attached);
596 if (ret < 0) 601 if (ret < 0)
597 return ret; 602 return ret;
598 extcon_set_cable_state_(info->edev, EXTCON_USB, attached); 603 extcon_set_cable_state_(info->edev, EXTCON_USB, attached);
@@ -615,7 +620,7 @@ static int max77693_muic_jig_handler(struct max77693_muic_info *info,
615 int cable_type, bool attached) 620 int cable_type, bool attached)
616{ 621{
617 int ret = 0; 622 int ret = 0;
618 u8 path = CONTROL1_SW_OPEN; 623 u8 path = MAX77693_CONTROL1_SW_OPEN;
619 624
620 dev_info(info->dev, 625 dev_info(info->dev,
621 "external connector is %s (adc:0x%02x)\n", 626 "external connector is %s (adc:0x%02x)\n",
@@ -625,12 +630,12 @@ static int max77693_muic_jig_handler(struct max77693_muic_info *info,
625 case MAX77693_MUIC_ADC_FACTORY_MODE_USB_OFF: /* ADC_JIG_USB_OFF */ 630 case MAX77693_MUIC_ADC_FACTORY_MODE_USB_OFF: /* ADC_JIG_USB_OFF */
626 case MAX77693_MUIC_ADC_FACTORY_MODE_USB_ON: /* ADC_JIG_USB_ON */ 631 case MAX77693_MUIC_ADC_FACTORY_MODE_USB_ON: /* ADC_JIG_USB_ON */
627 /* PATH:AP_USB */ 632 /* PATH:AP_USB */
628 path = CONTROL1_SW_USB; 633 path = MAX77693_CONTROL1_SW_USB;
629 break; 634 break;
630 case MAX77693_MUIC_ADC_FACTORY_MODE_UART_OFF: /* ADC_JIG_UART_OFF */ 635 case MAX77693_MUIC_ADC_FACTORY_MODE_UART_OFF: /* ADC_JIG_UART_OFF */
631 case MAX77693_MUIC_ADC_FACTORY_MODE_UART_ON: /* ADC_JIG_UART_ON */ 636 case MAX77693_MUIC_ADC_FACTORY_MODE_UART_ON: /* ADC_JIG_UART_ON */
632 /* PATH:AP_UART */ 637 /* PATH:AP_UART */
633 path = CONTROL1_SW_UART; 638 path = MAX77693_CONTROL1_SW_UART;
634 break; 639 break;
635 default: 640 default:
636 dev_err(info->dev, "failed to detect %s jig cable\n", 641 dev_err(info->dev, "failed to detect %s jig cable\n",
@@ -1077,7 +1082,7 @@ static int max77693_muic_probe(struct platform_device *pdev)
1077 dev_dbg(&pdev->dev, "allocate register map\n"); 1082 dev_dbg(&pdev->dev, "allocate register map\n");
1078 } else { 1083 } else {
1079 info->max77693->regmap_muic = devm_regmap_init_i2c( 1084 info->max77693->regmap_muic = devm_regmap_init_i2c(
1080 info->max77693->muic, 1085 info->max77693->i2c_muic,
1081 &max77693_muic_regmap_config); 1086 &max77693_muic_regmap_config);
1082 if (IS_ERR(info->max77693->regmap_muic)) { 1087 if (IS_ERR(info->max77693->regmap_muic)) {
1083 ret = PTR_ERR(info->max77693->regmap_muic); 1088 ret = PTR_ERR(info->max77693->regmap_muic);
@@ -1164,28 +1169,9 @@ static int max77693_muic_probe(struct platform_device *pdev)
1164 } 1169 }
1165 1170
1166 for (i = 0; i < num_init_data; i++) { 1171 for (i = 0; i < num_init_data; i++) {
1167 enum max77693_irq_source irq_src
1168 = MAX77693_IRQ_GROUP_NR;
1169
1170 regmap_write(info->max77693->regmap_muic, 1172 regmap_write(info->max77693->regmap_muic,
1171 init_data[i].addr, 1173 init_data[i].addr,
1172 init_data[i].data); 1174 init_data[i].data);
1173
1174 switch (init_data[i].addr) {
1175 case MAX77693_MUIC_REG_INTMASK1:
1176 irq_src = MUIC_INT1;
1177 break;
1178 case MAX77693_MUIC_REG_INTMASK2:
1179 irq_src = MUIC_INT2;
1180 break;
1181 case MAX77693_MUIC_REG_INTMASK3:
1182 irq_src = MUIC_INT3;
1183 break;
1184 }
1185
1186 if (irq_src < MAX77693_IRQ_GROUP_NR)
1187 info->max77693->irq_masks_cur[irq_src]
1188 = init_data[i].data;
1189 } 1175 }
1190 1176
1191 if (pdata && pdata->muic_data) { 1177 if (pdata && pdata->muic_data) {
@@ -1199,12 +1185,12 @@ static int max77693_muic_probe(struct platform_device *pdev)
1199 if (muic_pdata->path_uart) 1185 if (muic_pdata->path_uart)
1200 info->path_uart = muic_pdata->path_uart; 1186 info->path_uart = muic_pdata->path_uart;
1201 else 1187 else
1202 info->path_uart = CONTROL1_SW_UART; 1188 info->path_uart = MAX77693_CONTROL1_SW_UART;
1203 1189
1204 if (muic_pdata->path_usb) 1190 if (muic_pdata->path_usb)
1205 info->path_usb = muic_pdata->path_usb; 1191 info->path_usb = muic_pdata->path_usb;
1206 else 1192 else
1207 info->path_usb = CONTROL1_SW_USB; 1193 info->path_usb = MAX77693_CONTROL1_SW_USB;
1208 1194
1209 /* 1195 /*
1210 * Default delay time for detecting cable state 1196 * Default delay time for detecting cable state
@@ -1216,8 +1202,8 @@ static int max77693_muic_probe(struct platform_device *pdev)
1216 else 1202 else
1217 delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT); 1203 delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
1218 } else { 1204 } else {
1219 info->path_usb = CONTROL1_SW_USB; 1205 info->path_usb = MAX77693_CONTROL1_SW_USB;
1220 info->path_uart = CONTROL1_SW_UART; 1206 info->path_uart = MAX77693_CONTROL1_SW_UART;
1221 delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT); 1207 delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
1222 } 1208 }
1223 1209
diff --git a/drivers/extcon/extcon-max77843.c b/drivers/extcon/extcon-max77843.c
index fac2f1417a79..f652c4199870 100644
--- a/drivers/extcon/extcon-max77843.c
+++ b/drivers/extcon/extcon-max77843.c
@@ -15,6 +15,7 @@
15#include <linux/i2c.h> 15#include <linux/i2c.h>
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/mfd/max77693-common.h>
18#include <linux/mfd/max77843-private.h> 19#include <linux/mfd/max77843-private.h>
19#include <linux/module.h> 20#include <linux/module.h>
20#include <linux/platform_device.h> 21#include <linux/platform_device.h>
@@ -32,7 +33,7 @@ enum max77843_muic_status {
32 33
33struct max77843_muic_info { 34struct max77843_muic_info {
34 struct device *dev; 35 struct device *dev;
35 struct max77843 *max77843; 36 struct max77693_dev *max77843;
36 struct extcon_dev *edev; 37 struct extcon_dev *edev;
37 38
38 struct mutex mutex; 39 struct mutex mutex;
@@ -198,18 +199,18 @@ static const struct regmap_irq_chip max77843_muic_irq_chip = {
198static int max77843_muic_set_path(struct max77843_muic_info *info, 199static int max77843_muic_set_path(struct max77843_muic_info *info,
199 u8 val, bool attached) 200 u8 val, bool attached)
200{ 201{
201 struct max77843 *max77843 = info->max77843; 202 struct max77693_dev *max77843 = info->max77843;
202 int ret = 0; 203 int ret = 0;
203 unsigned int ctrl1, ctrl2; 204 unsigned int ctrl1, ctrl2;
204 205
205 if (attached) 206 if (attached)
206 ctrl1 = val; 207 ctrl1 = val;
207 else 208 else
208 ctrl1 = CONTROL1_SW_OPEN; 209 ctrl1 = MAX77843_MUIC_CONTROL1_SW_OPEN;
209 210
210 ret = regmap_update_bits(max77843->regmap_muic, 211 ret = regmap_update_bits(max77843->regmap_muic,
211 MAX77843_MUIC_REG_CONTROL1, 212 MAX77843_MUIC_REG_CONTROL1,
212 CONTROL1_COM_SW, ctrl1); 213 MAX77843_MUIC_CONTROL1_COM_SW, ctrl1);
213 if (ret < 0) { 214 if (ret < 0) {
214 dev_err(info->dev, "Cannot switch MUIC port\n"); 215 dev_err(info->dev, "Cannot switch MUIC port\n");
215 return ret; 216 return ret;
@@ -243,7 +244,7 @@ static int max77843_muic_get_cable_type(struct max77843_muic_info *info,
243 244
244 adc = info->status[MAX77843_MUIC_STATUS1] & 245 adc = info->status[MAX77843_MUIC_STATUS1] &
245 MAX77843_MUIC_STATUS1_ADC_MASK; 246 MAX77843_MUIC_STATUS1_ADC_MASK;
246 adc >>= STATUS1_ADC_SHIFT; 247 adc >>= MAX77843_MUIC_STATUS1_ADC_SHIFT;
247 248
248 switch (group) { 249 switch (group) {
249 case MAX77843_CABLE_GROUP_ADC: 250 case MAX77843_CABLE_GROUP_ADC:
@@ -309,7 +310,7 @@ static int max77843_muic_get_cable_type(struct max77843_muic_info *info,
309 /* Get VBVolt register bit */ 310 /* Get VBVolt register bit */
310 gnd_type |= (info->status[MAX77843_MUIC_STATUS2] & 311 gnd_type |= (info->status[MAX77843_MUIC_STATUS2] &
311 MAX77843_MUIC_STATUS2_VBVOLT_MASK); 312 MAX77843_MUIC_STATUS2_VBVOLT_MASK);
312 gnd_type >>= STATUS2_VBVOLT_SHIFT; 313 gnd_type >>= MAX77843_MUIC_STATUS2_VBVOLT_SHIFT;
313 314
314 /* Offset of GND cable */ 315 /* Offset of GND cable */
315 gnd_type |= MAX77843_MUIC_GND_USB_HOST; 316 gnd_type |= MAX77843_MUIC_GND_USB_HOST;
@@ -338,7 +339,9 @@ static int max77843_muic_adc_gnd_handler(struct max77843_muic_info *info)
338 switch (gnd_cable_type) { 339 switch (gnd_cable_type) {
339 case MAX77843_MUIC_GND_USB_HOST: 340 case MAX77843_MUIC_GND_USB_HOST:
340 case MAX77843_MUIC_GND_USB_HOST_VB: 341 case MAX77843_MUIC_GND_USB_HOST_VB:
341 ret = max77843_muic_set_path(info, CONTROL1_SW_USB, attached); 342 ret = max77843_muic_set_path(info,
343 MAX77843_MUIC_CONTROL1_SW_USB,
344 attached);
342 if (ret < 0) 345 if (ret < 0)
343 return ret; 346 return ret;
344 347
@@ -346,7 +349,9 @@ static int max77843_muic_adc_gnd_handler(struct max77843_muic_info *info)
346 break; 349 break;
347 case MAX77843_MUIC_GND_MHL_VB: 350 case MAX77843_MUIC_GND_MHL_VB:
348 case MAX77843_MUIC_GND_MHL: 351 case MAX77843_MUIC_GND_MHL:
349 ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached); 352 ret = max77843_muic_set_path(info,
353 MAX77843_MUIC_CONTROL1_SW_OPEN,
354 attached);
350 if (ret < 0) 355 if (ret < 0)
351 return ret; 356 return ret;
352 357
@@ -365,7 +370,7 @@ static int max77843_muic_jig_handler(struct max77843_muic_info *info,
365 int cable_type, bool attached) 370 int cable_type, bool attached)
366{ 371{
367 int ret; 372 int ret;
368 u8 path = CONTROL1_SW_OPEN; 373 u8 path = MAX77843_MUIC_CONTROL1_SW_OPEN;
369 374
370 dev_dbg(info->dev, "external connector is %s (adc:0x%02x)\n", 375 dev_dbg(info->dev, "external connector is %s (adc:0x%02x)\n",
371 attached ? "attached" : "detached", cable_type); 376 attached ? "attached" : "detached", cable_type);
@@ -373,10 +378,10 @@ static int max77843_muic_jig_handler(struct max77843_muic_info *info,
373 switch (cable_type) { 378 switch (cable_type) {
374 case MAX77843_MUIC_ADC_FACTORY_MODE_USB_OFF: 379 case MAX77843_MUIC_ADC_FACTORY_MODE_USB_OFF:
375 case MAX77843_MUIC_ADC_FACTORY_MODE_USB_ON: 380 case MAX77843_MUIC_ADC_FACTORY_MODE_USB_ON:
376 path = CONTROL1_SW_USB; 381 path = MAX77843_MUIC_CONTROL1_SW_USB;
377 break; 382 break;
378 case MAX77843_MUIC_ADC_FACTORY_MODE_UART_OFF: 383 case MAX77843_MUIC_ADC_FACTORY_MODE_UART_OFF:
379 path = CONTROL1_SW_UART; 384 path = MAX77843_MUIC_CONTROL1_SW_UART;
380 break; 385 break;
381 default: 386 default:
382 return -EINVAL; 387 return -EINVAL;
@@ -474,14 +479,18 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info)
474 479
475 switch (chg_type) { 480 switch (chg_type) {
476 case MAX77843_MUIC_CHG_USB: 481 case MAX77843_MUIC_CHG_USB:
477 ret = max77843_muic_set_path(info, CONTROL1_SW_USB, attached); 482 ret = max77843_muic_set_path(info,
483 MAX77843_MUIC_CONTROL1_SW_USB,
484 attached);
478 if (ret < 0) 485 if (ret < 0)
479 return ret; 486 return ret;
480 487
481 extcon_set_cable_state_(info->edev, EXTCON_USB, attached); 488 extcon_set_cable_state_(info->edev, EXTCON_USB, attached);
482 break; 489 break;
483 case MAX77843_MUIC_CHG_DOWNSTREAM: 490 case MAX77843_MUIC_CHG_DOWNSTREAM:
484 ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached); 491 ret = max77843_muic_set_path(info,
492 MAX77843_MUIC_CONTROL1_SW_OPEN,
493 attached);
485 if (ret < 0) 494 if (ret < 0)
486 return ret; 495 return ret;
487 496
@@ -489,14 +498,18 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info)
489 attached); 498 attached);
490 break; 499 break;
491 case MAX77843_MUIC_CHG_DEDICATED: 500 case MAX77843_MUIC_CHG_DEDICATED:
492 ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached); 501 ret = max77843_muic_set_path(info,
502 MAX77843_MUIC_CONTROL1_SW_OPEN,
503 attached);
493 if (ret < 0) 504 if (ret < 0)
494 return ret; 505 return ret;
495 506
496 extcon_set_cable_state_(info->edev, EXTCON_TA, attached); 507 extcon_set_cable_state_(info->edev, EXTCON_TA, attached);
497 break; 508 break;
498 case MAX77843_MUIC_CHG_SPECIAL_500MA: 509 case MAX77843_MUIC_CHG_SPECIAL_500MA:
499 ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached); 510 ret = max77843_muic_set_path(info,
511 MAX77843_MUIC_CONTROL1_SW_OPEN,
512 attached);
500 if (ret < 0) 513 if (ret < 0)
501 return ret; 514 return ret;
502 515
@@ -504,7 +517,9 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info)
504 attached); 517 attached);
505 break; 518 break;
506 case MAX77843_MUIC_CHG_SPECIAL_1A: 519 case MAX77843_MUIC_CHG_SPECIAL_1A:
507 ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached); 520 ret = max77843_muic_set_path(info,
521 MAX77843_MUIC_CONTROL1_SW_OPEN,
522 attached);
508 if (ret < 0) 523 if (ret < 0)
509 return ret; 524 return ret;
510 525
@@ -528,7 +543,8 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info)
528 "failed to detect %s accessory (chg_type:0x%x)\n", 543 "failed to detect %s accessory (chg_type:0x%x)\n",
529 attached ? "attached" : "detached", chg_type); 544 attached ? "attached" : "detached", chg_type);
530 545
531 max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached); 546 max77843_muic_set_path(info, MAX77843_MUIC_CONTROL1_SW_OPEN,
547 attached);
532 return -EINVAL; 548 return -EINVAL;
533 } 549 }
534 550
@@ -539,7 +555,7 @@ static void max77843_muic_irq_work(struct work_struct *work)
539{ 555{
540 struct max77843_muic_info *info = container_of(work, 556 struct max77843_muic_info *info = container_of(work,
541 struct max77843_muic_info, irq_work); 557 struct max77843_muic_info, irq_work);
542 struct max77843 *max77843 = info->max77843; 558 struct max77693_dev *max77843 = info->max77843;
543 int ret = 0; 559 int ret = 0;
544 560
545 mutex_lock(&info->mutex); 561 mutex_lock(&info->mutex);
@@ -615,7 +631,7 @@ static void max77843_muic_detect_cable_wq(struct work_struct *work)
615{ 631{
616 struct max77843_muic_info *info = container_of(to_delayed_work(work), 632 struct max77843_muic_info *info = container_of(to_delayed_work(work),
617 struct max77843_muic_info, wq_detcable); 633 struct max77843_muic_info, wq_detcable);
618 struct max77843 *max77843 = info->max77843; 634 struct max77693_dev *max77843 = info->max77843;
619 int chg_type, adc, ret; 635 int chg_type, adc, ret;
620 bool attached; 636 bool attached;
621 637
@@ -656,7 +672,7 @@ err_cable_wq:
656static int max77843_muic_set_debounce_time(struct max77843_muic_info *info, 672static int max77843_muic_set_debounce_time(struct max77843_muic_info *info,
657 enum max77843_muic_adc_debounce_time time) 673 enum max77843_muic_adc_debounce_time time)
658{ 674{
659 struct max77843 *max77843 = info->max77843; 675 struct max77693_dev *max77843 = info->max77843;
660 int ret; 676 int ret;
661 677
662 switch (time) { 678 switch (time) {
@@ -667,7 +683,7 @@ static int max77843_muic_set_debounce_time(struct max77843_muic_info *info,
667 ret = regmap_update_bits(max77843->regmap_muic, 683 ret = regmap_update_bits(max77843->regmap_muic,
668 MAX77843_MUIC_REG_CONTROL4, 684 MAX77843_MUIC_REG_CONTROL4,
669 MAX77843_MUIC_CONTROL4_ADCDBSET_MASK, 685 MAX77843_MUIC_CONTROL4_ADCDBSET_MASK,
670 time << CONTROL4_ADCDBSET_SHIFT); 686 time << MAX77843_MUIC_CONTROL4_ADCDBSET_SHIFT);
671 if (ret < 0) { 687 if (ret < 0) {
672 dev_err(info->dev, "Cannot write MUIC regmap\n"); 688 dev_err(info->dev, "Cannot write MUIC regmap\n");
673 return ret; 689 return ret;
@@ -681,7 +697,7 @@ static int max77843_muic_set_debounce_time(struct max77843_muic_info *info,
681 return 0; 697 return 0;
682} 698}
683 699
684static int max77843_init_muic_regmap(struct max77843 *max77843) 700static int max77843_init_muic_regmap(struct max77693_dev *max77843)
685{ 701{
686 int ret; 702 int ret;
687 703
@@ -720,7 +736,7 @@ err_muic_i2c:
720 736
721static int max77843_muic_probe(struct platform_device *pdev) 737static int max77843_muic_probe(struct platform_device *pdev)
722{ 738{
723 struct max77843 *max77843 = dev_get_drvdata(pdev->dev.parent); 739 struct max77693_dev *max77843 = dev_get_drvdata(pdev->dev.parent);
724 struct max77843_muic_info *info; 740 struct max77843_muic_info *info;
725 unsigned int id; 741 unsigned int id;
726 int i, ret; 742 int i, ret;
@@ -768,7 +784,7 @@ static int max77843_muic_probe(struct platform_device *pdev)
768 max77843_muic_set_debounce_time(info, MAX77843_DEBOUNCE_TIME_25MS); 784 max77843_muic_set_debounce_time(info, MAX77843_DEBOUNCE_TIME_25MS);
769 785
770 /* Set initial path for UART */ 786 /* Set initial path for UART */
771 max77843_muic_set_path(info, CONTROL1_SW_UART, true); 787 max77843_muic_set_path(info, MAX77843_MUIC_CONTROL1_SW_UART, true);
772 788
773 /* Check revision number of MUIC device */ 789 /* Check revision number of MUIC device */
774 ret = regmap_read(max77843->regmap_muic, MAX77843_MUIC_REG_ID, &id); 790 ret = regmap_read(max77843->regmap_muic, MAX77843_MUIC_REG_ID, &id);
@@ -821,7 +837,7 @@ err_muic_irq:
821static int max77843_muic_remove(struct platform_device *pdev) 837static int max77843_muic_remove(struct platform_device *pdev)
822{ 838{
823 struct max77843_muic_info *info = platform_get_drvdata(pdev); 839 struct max77843_muic_info *info = platform_get_drvdata(pdev);
824 struct max77843 *max77843 = info->max77843; 840 struct max77693_dev *max77843 = info->max77843;
825 841
826 cancel_work_sync(&info->irq_work); 842 cancel_work_sync(&info->irq_work);
827 regmap_del_irq_chip(max77843->irq, max77843->irq_data_muic); 843 regmap_del_irq_chip(max77843->irq, max77843->irq_data_muic);
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
index 080d5cc27055..eebdf2a33bfe 100644
--- a/drivers/extcon/extcon-palmas.c
+++ b/drivers/extcon/extcon-palmas.c
@@ -200,7 +200,6 @@ static int palmas_usb_probe(struct platform_device *pdev)
200 status = devm_extcon_dev_register(&pdev->dev, palmas_usb->edev); 200 status = devm_extcon_dev_register(&pdev->dev, palmas_usb->edev);
201 if (status) { 201 if (status) {
202 dev_err(&pdev->dev, "failed to register extcon device\n"); 202 dev_err(&pdev->dev, "failed to register extcon device\n");
203 kfree(palmas_usb->edev->name);
204 return status; 203 return status;
205 } 204 }
206 205
@@ -214,7 +213,6 @@ static int palmas_usb_probe(struct platform_device *pdev)
214 if (status < 0) { 213 if (status < 0) {
215 dev_err(&pdev->dev, "can't get IRQ %d, err %d\n", 214 dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
216 palmas_usb->id_irq, status); 215 palmas_usb->id_irq, status);
217 kfree(palmas_usb->edev->name);
218 return status; 216 return status;
219 } 217 }
220 } 218 }
@@ -229,7 +227,6 @@ static int palmas_usb_probe(struct platform_device *pdev)
229 if (status < 0) { 227 if (status < 0) {
230 dev_err(&pdev->dev, "can't get IRQ %d, err %d\n", 228 dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
231 palmas_usb->vbus_irq, status); 229 palmas_usb->vbus_irq, status);
232 kfree(palmas_usb->edev->name);
233 return status; 230 return status;
234 } 231 }
235 } 232 }
@@ -239,15 +236,6 @@ static int palmas_usb_probe(struct platform_device *pdev)
239 return 0; 236 return 0;
240} 237}
241 238
242static int palmas_usb_remove(struct platform_device *pdev)
243{
244 struct palmas_usb *palmas_usb = platform_get_drvdata(pdev);
245
246 kfree(palmas_usb->edev->name);
247
248 return 0;
249}
250
251#ifdef CONFIG_PM_SLEEP 239#ifdef CONFIG_PM_SLEEP
252static int palmas_usb_suspend(struct device *dev) 240static int palmas_usb_suspend(struct device *dev)
253{ 241{
@@ -288,7 +276,6 @@ static const struct of_device_id of_palmas_match_tbl[] = {
288 276
289static struct platform_driver palmas_usb_driver = { 277static struct platform_driver palmas_usb_driver = {
290 .probe = palmas_usb_probe, 278 .probe = palmas_usb_probe,
291 .remove = palmas_usb_remove,
292 .driver = { 279 .driver = {
293 .name = "palmas-usb", 280 .name = "palmas-usb",
294 .of_match_table = of_palmas_match_tbl, 281 .of_match_table = of_palmas_match_tbl,
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index 76157ab9faf3..43b57b02d050 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -124,25 +124,35 @@ static int find_cable_index_by_id(struct extcon_dev *edev, const unsigned int id
124 return -EINVAL; 124 return -EINVAL;
125} 125}
126 126
127static int find_cable_index_by_name(struct extcon_dev *edev, const char *name) 127static int find_cable_id_by_name(struct extcon_dev *edev, const char *name)
128{ 128{
129 unsigned int id = EXTCON_NONE; 129 unsigned int id = -EINVAL;
130 int i = 0; 130 int i = 0;
131 131
132 if (edev->max_supported == 0) 132 /* Find the id of extcon cable */
133 return -EINVAL;
134
135 /* Find the the number of extcon cable */
136 while (extcon_name[i]) { 133 while (extcon_name[i]) {
137 if (!strncmp(extcon_name[i], name, CABLE_NAME_MAX)) { 134 if (!strncmp(extcon_name[i], name, CABLE_NAME_MAX)) {
138 id = i; 135 id = i;
139 break; 136 break;
140 } 137 }
138 i++;
141 } 139 }
142 140
143 if (id == EXTCON_NONE) 141 return id;
142}
143
144static int find_cable_index_by_name(struct extcon_dev *edev, const char *name)
145{
146 unsigned int id;
147
148 if (edev->max_supported == 0)
144 return -EINVAL; 149 return -EINVAL;
145 150
151 /* Find the the number of extcon cable */
152 id = find_cable_id_by_name(edev, name);
153 if (id < 0)
154 return id;
155
146 return find_cable_index_by_id(edev, id); 156 return find_cable_index_by_id(edev, id);
147} 157}
148 158
@@ -228,9 +238,11 @@ static ssize_t cable_state_show(struct device *dev,
228 struct extcon_cable *cable = container_of(attr, struct extcon_cable, 238 struct extcon_cable *cable = container_of(attr, struct extcon_cable,
229 attr_state); 239 attr_state);
230 240
241 int i = cable->cable_index;
242
231 return sprintf(buf, "%d\n", 243 return sprintf(buf, "%d\n",
232 extcon_get_cable_state_(cable->edev, 244 extcon_get_cable_state_(cable->edev,
233 cable->cable_index)); 245 cable->edev->supported_cable[i]));
234} 246}
235 247
236/** 248/**
@@ -263,20 +275,25 @@ int extcon_update_state(struct extcon_dev *edev, u32 mask, u32 state)
263 spin_lock_irqsave(&edev->lock, flags); 275 spin_lock_irqsave(&edev->lock, flags);
264 276
265 if (edev->state != ((edev->state & ~mask) | (state & mask))) { 277 if (edev->state != ((edev->state & ~mask) | (state & mask))) {
278 u32 old_state;
279
266 if (check_mutually_exclusive(edev, (edev->state & ~mask) | 280 if (check_mutually_exclusive(edev, (edev->state & ~mask) |
267 (state & mask))) { 281 (state & mask))) {
268 spin_unlock_irqrestore(&edev->lock, flags); 282 spin_unlock_irqrestore(&edev->lock, flags);
269 return -EPERM; 283 return -EPERM;
270 } 284 }
271 285
272 for (index = 0; index < edev->max_supported; index++) { 286 old_state = edev->state;
273 if (is_extcon_changed(edev->state, state, index, &attached))
274 raw_notifier_call_chain(&edev->nh[index], attached, edev);
275 }
276
277 edev->state &= ~mask; 287 edev->state &= ~mask;
278 edev->state |= state & mask; 288 edev->state |= state & mask;
279 289
290 for (index = 0; index < edev->max_supported; index++) {
291 if (is_extcon_changed(old_state, edev->state, index,
292 &attached))
293 raw_notifier_call_chain(&edev->nh[index],
294 attached, edev);
295 }
296
280 /* This could be in interrupt handler */ 297 /* This could be in interrupt handler */
281 prop_buf = (char *)get_zeroed_page(GFP_ATOMIC); 298 prop_buf = (char *)get_zeroed_page(GFP_ATOMIC);
282 if (prop_buf) { 299 if (prop_buf) {
@@ -361,8 +378,13 @@ EXPORT_SYMBOL_GPL(extcon_get_cable_state_);
361 */ 378 */
362int extcon_get_cable_state(struct extcon_dev *edev, const char *cable_name) 379int extcon_get_cable_state(struct extcon_dev *edev, const char *cable_name)
363{ 380{
364 return extcon_get_cable_state_(edev, find_cable_index_by_name 381 unsigned int id;
365 (edev, cable_name)); 382
383 id = find_cable_id_by_name(edev, cable_name);
384 if (id < 0)
385 return id;
386
387 return extcon_get_cable_state_(edev, id);
366} 388}
367EXPORT_SYMBOL_GPL(extcon_get_cable_state); 389EXPORT_SYMBOL_GPL(extcon_get_cable_state);
368 390
@@ -404,8 +426,13 @@ EXPORT_SYMBOL_GPL(extcon_set_cable_state_);
404int extcon_set_cable_state(struct extcon_dev *edev, 426int extcon_set_cable_state(struct extcon_dev *edev,
405 const char *cable_name, bool cable_state) 427 const char *cable_name, bool cable_state)
406{ 428{
407 return extcon_set_cable_state_(edev, find_cable_index_by_name 429 unsigned int id;
408 (edev, cable_name), cable_state); 430
431 id = find_cable_id_by_name(edev, cable_name);
432 if (id < 0)
433 return id;
434
435 return extcon_set_cable_state_(edev, id, cable_state);
409} 436}
410EXPORT_SYMBOL_GPL(extcon_set_cable_state); 437EXPORT_SYMBOL_GPL(extcon_set_cable_state);
411 438
diff --git a/drivers/firmware/broadcom/bcm47xx_nvram.c b/drivers/firmware/broadcom/bcm47xx_nvram.c
index 87add3fdce52..e41594510b97 100644
--- a/drivers/firmware/broadcom/bcm47xx_nvram.c
+++ b/drivers/firmware/broadcom/bcm47xx_nvram.c
@@ -245,4 +245,4 @@ char *bcm47xx_nvram_get_contents(size_t *nvram_size)
245} 245}
246EXPORT_SYMBOL(bcm47xx_nvram_get_contents); 246EXPORT_SYMBOL(bcm47xx_nvram_get_contents);
247 247
248MODULE_LICENSE("GPLv2"); 248MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index 4fd9961d552e..d42537425438 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -305,10 +305,17 @@ const char *cper_mem_err_unpack(struct trace_seq *p,
305 return ret; 305 return ret;
306} 306}
307 307
308static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem) 308static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem,
309 int len)
309{ 310{
310 struct cper_mem_err_compact cmem; 311 struct cper_mem_err_compact cmem;
311 312
313 /* Don't trust UEFI 2.1/2.2 structure with bad validation bits */
314 if (len == sizeof(struct cper_sec_mem_err_old) &&
315 (mem->validation_bits & ~(CPER_MEM_VALID_RANK_NUMBER - 1))) {
316 pr_err(FW_WARN "valid bits set for fields beyond structure\n");
317 return;
318 }
312 if (mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS) 319 if (mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS)
313 printk("%s""error_status: 0x%016llx\n", pfx, mem->error_status); 320 printk("%s""error_status: 0x%016llx\n", pfx, mem->error_status);
314 if (mem->validation_bits & CPER_MEM_VALID_PA) 321 if (mem->validation_bits & CPER_MEM_VALID_PA)
@@ -405,8 +412,10 @@ static void cper_estatus_print_section(
405 } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PLATFORM_MEM)) { 412 } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PLATFORM_MEM)) {
406 struct cper_sec_mem_err *mem_err = (void *)(gdata + 1); 413 struct cper_sec_mem_err *mem_err = (void *)(gdata + 1);
407 printk("%s""section_type: memory error\n", newpfx); 414 printk("%s""section_type: memory error\n", newpfx);
408 if (gdata->error_data_length >= sizeof(*mem_err)) 415 if (gdata->error_data_length >=
409 cper_print_mem(newpfx, mem_err); 416 sizeof(struct cper_sec_mem_err_old))
417 cper_print_mem(newpfx, mem_err,
418 gdata->error_data_length);
410 else 419 else
411 goto err_section_too_small; 420 goto err_section_too_small;
412 } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PCIE)) { 421 } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PCIE)) {
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 9fa8084a7c8d..d6144e3b97c5 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -58,6 +58,11 @@ bool efi_runtime_disabled(void)
58 58
59static int __init parse_efi_cmdline(char *str) 59static int __init parse_efi_cmdline(char *str)
60{ 60{
61 if (!str) {
62 pr_warn("need at least one option\n");
63 return -EINVAL;
64 }
65
61 if (parse_option_str(str, "noruntime")) 66 if (parse_option_str(str, "noruntime"))
62 disable_runtime = true; 67 disable_runtime = true;
63 68
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index 7a3cb1fa0a76..4630a8133ea6 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -87,6 +87,15 @@ static int brcmstb_gpio_remove(struct platform_device *pdev)
87 struct brcmstb_gpio_bank *bank; 87 struct brcmstb_gpio_bank *bank;
88 int ret = 0; 88 int ret = 0;
89 89
90 if (!priv) {
91 dev_err(&pdev->dev, "called %s without drvdata!\n", __func__);
92 return -EFAULT;
93 }
94
95 /*
96 * You can lose return values below, but we report all errors, and it's
97 * more important to actually perform all of the steps.
98 */
90 list_for_each(pos, &priv->bank_list) { 99 list_for_each(pos, &priv->bank_list) {
91 bank = list_entry(pos, struct brcmstb_gpio_bank, node); 100 bank = list_entry(pos, struct brcmstb_gpio_bank, node);
92 ret = bgpio_remove(&bank->bgc); 101 ret = bgpio_remove(&bank->bgc);
@@ -143,6 +152,8 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
143 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 152 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
144 if (!priv) 153 if (!priv)
145 return -ENOMEM; 154 return -ENOMEM;
155 platform_set_drvdata(pdev, priv);
156 INIT_LIST_HEAD(&priv->bank_list);
146 157
147 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 158 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
148 reg_base = devm_ioremap_resource(dev, res); 159 reg_base = devm_ioremap_resource(dev, res);
@@ -153,7 +164,6 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
153 priv->reg_base = reg_base; 164 priv->reg_base = reg_base;
154 priv->pdev = pdev; 165 priv->pdev = pdev;
155 166
156 INIT_LIST_HEAD(&priv->bank_list);
157 if (brcmstb_gpio_sanity_check_banks(dev, np, res)) 167 if (brcmstb_gpio_sanity_check_banks(dev, np, res))
158 return -EINVAL; 168 return -EINVAL;
159 169
@@ -221,8 +231,6 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
221 dev_info(dev, "Registered %d banks (GPIO(s): %d-%d)\n", 231 dev_info(dev, "Registered %d banks (GPIO(s): %d-%d)\n",
222 priv->num_banks, priv->gpio_base, gpio_base - 1); 232 priv->num_banks, priv->gpio_base, gpio_base - 1);
223 233
224 platform_set_drvdata(pdev, priv);
225
226 return 0; 234 return 0;
227 235
228fail: 236fail:
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index c5e05c82d67c..c246ac3dda7c 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -578,15 +578,13 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
578 writel_relaxed(~0, &g->clr_falling); 578 writel_relaxed(~0, &g->clr_falling);
579 writel_relaxed(~0, &g->clr_rising); 579 writel_relaxed(~0, &g->clr_rising);
580 580
581 /* set up all irqs in this bank */
582 irq_set_chained_handler(bank_irq, gpio_irq_handler);
583
584 /* 581 /*
585 * Each chip handles 32 gpios, and each irq bank consists of 16 582 * Each chip handles 32 gpios, and each irq bank consists of 16
586 * gpio irqs. Pass the irq bank's corresponding controller to 583 * gpio irqs. Pass the irq bank's corresponding controller to
587 * the chained irq handler. 584 * the chained irq handler.
588 */ 585 */
589 irq_set_handler_data(bank_irq, &chips[gpio / 32]); 586 irq_set_chained_handler_and_data(bank_irq, gpio_irq_handler,
587 &chips[gpio / 32]);
590 588
591 binten |= BIT(bank); 589 binten |= BIT(bank);
592 } 590 }
diff --git a/drivers/gpio/gpio-max732x.c b/drivers/gpio/gpio-max732x.c
index aed4ca9338bc..7d3c90e9da71 100644
--- a/drivers/gpio/gpio-max732x.c
+++ b/drivers/gpio/gpio-max732x.c
@@ -603,6 +603,7 @@ static int max732x_setup_gpio(struct max732x_chip *chip,
603 gc->base = gpio_start; 603 gc->base = gpio_start;
604 gc->ngpio = port; 604 gc->ngpio = port;
605 gc->label = chip->client->name; 605 gc->label = chip->client->name;
606 gc->dev = &chip->client->dev;
606 gc->owner = THIS_MODULE; 607 gc->owner = THIS_MODULE;
607 608
608 return port; 609 return port;
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index b0c57d505be7..61a731ff9a07 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -500,8 +500,10 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
500 500
501 spin_lock_irqsave(&bank->lock, flags); 501 spin_lock_irqsave(&bank->lock, flags);
502 retval = omap_set_gpio_triggering(bank, offset, type); 502 retval = omap_set_gpio_triggering(bank, offset, type);
503 if (retval) 503 if (retval) {
504 spin_unlock_irqrestore(&bank->lock, flags);
504 goto error; 505 goto error;
506 }
505 omap_gpio_init_irq(bank, offset); 507 omap_gpio_init_irq(bank, offset);
506 if (!omap_gpio_is_input(bank, offset)) { 508 if (!omap_gpio_is_input(bank, offset)) {
507 spin_unlock_irqrestore(&bank->lock, flags); 509 spin_unlock_irqrestore(&bank->lock, flags);
@@ -1185,6 +1187,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
1185 bank->irq = res->start; 1187 bank->irq = res->start;
1186 bank->dev = dev; 1188 bank->dev = dev;
1187 bank->chip.dev = dev; 1189 bank->chip.dev = dev;
1190 bank->chip.owner = THIS_MODULE;
1188 bank->dbck_flag = pdata->dbck_flag; 1191 bank->dbck_flag = pdata->dbck_flag;
1189 bank->stride = pdata->bank_stride; 1192 bank->stride = pdata->bank_stride;
1190 bank->width = pdata->bank_width; 1193 bank->width = pdata->bank_width;
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index d233eb3b8132..50caeb1ee350 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -570,6 +570,10 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
570 "could not connect irqchip to gpiochip\n"); 570 "could not connect irqchip to gpiochip\n");
571 return ret; 571 return ret;
572 } 572 }
573
574 gpiochip_set_chained_irqchip(&chip->gpio_chip,
575 &pca953x_irq_chip,
576 client->irq, NULL);
573 } 577 }
574 578
575 return 0; 579 return 0;
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index 77fe5d3cb105..d5284dfe01fe 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -220,9 +220,9 @@ static void xgpio_save_regs(struct of_mm_gpio_chip *mm_gc)
220 if (!chip->gpio_width[1]) 220 if (!chip->gpio_width[1])
221 return; 221 return;
222 222
223 xgpio_writereg(mm_gc->regs + XGPIO_DATA_OFFSET + XGPIO_TRI_OFFSET, 223 xgpio_writereg(mm_gc->regs + XGPIO_DATA_OFFSET + XGPIO_CHANNEL_OFFSET,
224 chip->gpio_state[1]); 224 chip->gpio_state[1]);
225 xgpio_writereg(mm_gc->regs + XGPIO_TRI_OFFSET + XGPIO_TRI_OFFSET, 225 xgpio_writereg(mm_gc->regs + XGPIO_TRI_OFFSET + XGPIO_CHANNEL_OFFSET,
226 chip->gpio_dir[1]); 226 chip->gpio_dir[1]);
227} 227}
228 228
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index 2e87c4b8da26..a78882389836 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -757,6 +757,7 @@ static int zynq_gpio_remove(struct platform_device *pdev)
757 gpiochip_remove(&gpio->chip); 757 gpiochip_remove(&gpio->chip);
758 clk_disable_unprepare(gpio->clk); 758 clk_disable_unprepare(gpio->clk);
759 device_set_wakeup_capable(&pdev->dev, 0); 759 device_set_wakeup_capable(&pdev->dev, 0);
760 pm_runtime_disable(&pdev->dev);
760 return 0; 761 return 0;
761} 762}
762 763
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 01657830b470..f7b49d5ce4b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1130,6 +1130,9 @@ struct amdgpu_gfx {
1130 uint32_t me_feature_version; 1130 uint32_t me_feature_version;
1131 uint32_t ce_feature_version; 1131 uint32_t ce_feature_version;
1132 uint32_t pfp_feature_version; 1132 uint32_t pfp_feature_version;
1133 uint32_t rlc_feature_version;
1134 uint32_t mec_feature_version;
1135 uint32_t mec2_feature_version;
1133 struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS]; 1136 struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
1134 unsigned num_gfx_rings; 1137 unsigned num_gfx_rings;
1135 struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS]; 1138 struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
@@ -1614,6 +1617,9 @@ struct amdgpu_uvd {
1614#define AMDGPU_MAX_VCE_HANDLES 16 1617#define AMDGPU_MAX_VCE_HANDLES 16
1615#define AMDGPU_VCE_FIRMWARE_OFFSET 256 1618#define AMDGPU_VCE_FIRMWARE_OFFSET 256
1616 1619
1620#define AMDGPU_VCE_HARVEST_VCE0 (1 << 0)
1621#define AMDGPU_VCE_HARVEST_VCE1 (1 << 1)
1622
1617struct amdgpu_vce { 1623struct amdgpu_vce {
1618 struct amdgpu_bo *vcpu_bo; 1624 struct amdgpu_bo *vcpu_bo;
1619 uint64_t gpu_addr; 1625 uint64_t gpu_addr;
@@ -1626,6 +1632,7 @@ struct amdgpu_vce {
1626 const struct firmware *fw; /* VCE firmware */ 1632 const struct firmware *fw; /* VCE firmware */
1627 struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS]; 1633 struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS];
1628 struct amdgpu_irq_src irq; 1634 struct amdgpu_irq_src irq;
1635 unsigned harvest_config;
1629}; 1636};
1630 1637
1631/* 1638/*
@@ -1635,6 +1642,7 @@ struct amdgpu_sdma {
1635 /* SDMA firmware */ 1642 /* SDMA firmware */
1636 const struct firmware *fw; 1643 const struct firmware *fw;
1637 uint32_t fw_version; 1644 uint32_t fw_version;
1645 uint32_t feature_version;
1638 1646
1639 struct amdgpu_ring ring; 1647 struct amdgpu_ring ring;
1640}; 1648};
@@ -1862,6 +1870,12 @@ typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
1862typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t); 1870typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
1863typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t); 1871typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
1864 1872
1873struct amdgpu_ip_block_status {
1874 bool valid;
1875 bool sw;
1876 bool hw;
1877};
1878
1865struct amdgpu_device { 1879struct amdgpu_device {
1866 struct device *dev; 1880 struct device *dev;
1867 struct drm_device *ddev; 1881 struct drm_device *ddev;
@@ -2004,7 +2018,7 @@ struct amdgpu_device {
2004 2018
2005 const struct amdgpu_ip_block_version *ip_blocks; 2019 const struct amdgpu_ip_block_version *ip_blocks;
2006 int num_ip_blocks; 2020 int num_ip_blocks;
2007 bool *ip_block_enabled; 2021 struct amdgpu_ip_block_status *ip_block_status;
2008 struct mutex mn_lock; 2022 struct mutex mn_lock;
2009 DECLARE_HASHTABLE(mn_hash, 7); 2023 DECLARE_HASHTABLE(mn_hash, 7);
2010 2024
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index d63135bf29c0..1f040d85ac47 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -669,6 +669,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
669static int amdgpu_cs_dependencies(struct amdgpu_device *adev, 669static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
670 struct amdgpu_cs_parser *p) 670 struct amdgpu_cs_parser *p)
671{ 671{
672 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
672 struct amdgpu_ib *ib; 673 struct amdgpu_ib *ib;
673 int i, j, r; 674 int i, j, r;
674 675
@@ -694,6 +695,7 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
694 for (j = 0; j < num_deps; ++j) { 695 for (j = 0; j < num_deps; ++j) {
695 struct amdgpu_fence *fence; 696 struct amdgpu_fence *fence;
696 struct amdgpu_ring *ring; 697 struct amdgpu_ring *ring;
698 struct amdgpu_ctx *ctx;
697 699
698 r = amdgpu_cs_get_ring(adev, deps[j].ip_type, 700 r = amdgpu_cs_get_ring(adev, deps[j].ip_type,
699 deps[j].ip_instance, 701 deps[j].ip_instance,
@@ -701,14 +703,21 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
701 if (r) 703 if (r)
702 return r; 704 return r;
703 705
706 ctx = amdgpu_ctx_get(fpriv, deps[j].ctx_id);
707 if (ctx == NULL)
708 return -EINVAL;
709
704 r = amdgpu_fence_recreate(ring, p->filp, 710 r = amdgpu_fence_recreate(ring, p->filp,
705 deps[j].handle, 711 deps[j].handle,
706 &fence); 712 &fence);
707 if (r) 713 if (r) {
714 amdgpu_ctx_put(ctx);
708 return r; 715 return r;
716 }
709 717
710 amdgpu_sync_fence(&ib->sync, fence); 718 amdgpu_sync_fence(&ib->sync, fence);
711 amdgpu_fence_unref(&fence); 719 amdgpu_fence_unref(&fence);
720 amdgpu_ctx_put(ctx);
712 } 721 }
713 } 722 }
714 723
@@ -808,12 +817,16 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
808 817
809 r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance, 818 r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
810 wait->in.ring, &ring); 819 wait->in.ring, &ring);
811 if (r) 820 if (r) {
821 amdgpu_ctx_put(ctx);
812 return r; 822 return r;
823 }
813 824
814 r = amdgpu_fence_recreate(ring, filp, wait->in.handle, &fence); 825 r = amdgpu_fence_recreate(ring, filp, wait->in.handle, &fence);
815 if (r) 826 if (r) {
827 amdgpu_ctx_put(ctx);
816 return r; 828 return r;
829 }
817 830
818 r = fence_wait_timeout(&fence->base, true, timeout); 831 r = fence_wait_timeout(&fence->base, true, timeout);
819 amdgpu_fence_unref(&fence); 832 amdgpu_fence_unref(&fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index ba46be361c9b..99f158e1baff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1191,8 +1191,9 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
1191 return -EINVAL; 1191 return -EINVAL;
1192 } 1192 }
1193 1193
1194 adev->ip_block_enabled = kcalloc(adev->num_ip_blocks, sizeof(bool), GFP_KERNEL); 1194 adev->ip_block_status = kcalloc(adev->num_ip_blocks,
1195 if (adev->ip_block_enabled == NULL) 1195 sizeof(struct amdgpu_ip_block_status), GFP_KERNEL);
1196 if (adev->ip_block_status == NULL)
1196 return -ENOMEM; 1197 return -ENOMEM;
1197 1198
1198 if (adev->ip_blocks == NULL) { 1199 if (adev->ip_blocks == NULL) {
@@ -1203,14 +1204,19 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
1203 for (i = 0; i < adev->num_ip_blocks; i++) { 1204 for (i = 0; i < adev->num_ip_blocks; i++) {
1204 if ((amdgpu_ip_block_mask & (1 << i)) == 0) { 1205 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1205 DRM_ERROR("disabled ip block: %d\n", i); 1206 DRM_ERROR("disabled ip block: %d\n", i);
1206 adev->ip_block_enabled[i] = false; 1207 adev->ip_block_status[i].valid = false;
1207 } else { 1208 } else {
1208 if (adev->ip_blocks[i].funcs->early_init) { 1209 if (adev->ip_blocks[i].funcs->early_init) {
1209 r = adev->ip_blocks[i].funcs->early_init((void *)adev); 1210 r = adev->ip_blocks[i].funcs->early_init((void *)adev);
1210 if (r) 1211 if (r == -ENOENT)
1212 adev->ip_block_status[i].valid = false;
1213 else if (r)
1211 return r; 1214 return r;
1215 else
1216 adev->ip_block_status[i].valid = true;
1217 } else {
1218 adev->ip_block_status[i].valid = true;
1212 } 1219 }
1213 adev->ip_block_enabled[i] = true;
1214 } 1220 }
1215 } 1221 }
1216 1222
@@ -1222,11 +1228,12 @@ static int amdgpu_init(struct amdgpu_device *adev)
1222 int i, r; 1228 int i, r;
1223 1229
1224 for (i = 0; i < adev->num_ip_blocks; i++) { 1230 for (i = 0; i < adev->num_ip_blocks; i++) {
1225 if (!adev->ip_block_enabled[i]) 1231 if (!adev->ip_block_status[i].valid)
1226 continue; 1232 continue;
1227 r = adev->ip_blocks[i].funcs->sw_init((void *)adev); 1233 r = adev->ip_blocks[i].funcs->sw_init((void *)adev);
1228 if (r) 1234 if (r)
1229 return r; 1235 return r;
1236 adev->ip_block_status[i].sw = true;
1230 /* need to do gmc hw init early so we can allocate gpu mem */ 1237 /* need to do gmc hw init early so we can allocate gpu mem */
1231 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) { 1238 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
1232 r = amdgpu_vram_scratch_init(adev); 1239 r = amdgpu_vram_scratch_init(adev);
@@ -1238,11 +1245,12 @@ static int amdgpu_init(struct amdgpu_device *adev)
1238 r = amdgpu_wb_init(adev); 1245 r = amdgpu_wb_init(adev);
1239 if (r) 1246 if (r)
1240 return r; 1247 return r;
1248 adev->ip_block_status[i].hw = true;
1241 } 1249 }
1242 } 1250 }
1243 1251
1244 for (i = 0; i < adev->num_ip_blocks; i++) { 1252 for (i = 0; i < adev->num_ip_blocks; i++) {
1245 if (!adev->ip_block_enabled[i]) 1253 if (!adev->ip_block_status[i].sw)
1246 continue; 1254 continue;
1247 /* gmc hw init is done early */ 1255 /* gmc hw init is done early */
1248 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) 1256 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC)
@@ -1250,6 +1258,7 @@ static int amdgpu_init(struct amdgpu_device *adev)
1250 r = adev->ip_blocks[i].funcs->hw_init((void *)adev); 1258 r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
1251 if (r) 1259 if (r)
1252 return r; 1260 return r;
1261 adev->ip_block_status[i].hw = true;
1253 } 1262 }
1254 1263
1255 return 0; 1264 return 0;
@@ -1260,7 +1269,7 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
1260 int i = 0, r; 1269 int i = 0, r;
1261 1270
1262 for (i = 0; i < adev->num_ip_blocks; i++) { 1271 for (i = 0; i < adev->num_ip_blocks; i++) {
1263 if (!adev->ip_block_enabled[i]) 1272 if (!adev->ip_block_status[i].valid)
1264 continue; 1273 continue;
1265 /* enable clockgating to save power */ 1274 /* enable clockgating to save power */
1266 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, 1275 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
@@ -1282,7 +1291,7 @@ static int amdgpu_fini(struct amdgpu_device *adev)
1282 int i, r; 1291 int i, r;
1283 1292
1284 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 1293 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1285 if (!adev->ip_block_enabled[i]) 1294 if (!adev->ip_block_status[i].hw)
1286 continue; 1295 continue;
1287 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) { 1296 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
1288 amdgpu_wb_fini(adev); 1297 amdgpu_wb_fini(adev);
@@ -1295,14 +1304,16 @@ static int amdgpu_fini(struct amdgpu_device *adev)
1295 return r; 1304 return r;
1296 r = adev->ip_blocks[i].funcs->hw_fini((void *)adev); 1305 r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
1297 /* XXX handle errors */ 1306 /* XXX handle errors */
1307 adev->ip_block_status[i].hw = false;
1298 } 1308 }
1299 1309
1300 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 1310 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1301 if (!adev->ip_block_enabled[i]) 1311 if (!adev->ip_block_status[i].sw)
1302 continue; 1312 continue;
1303 r = adev->ip_blocks[i].funcs->sw_fini((void *)adev); 1313 r = adev->ip_blocks[i].funcs->sw_fini((void *)adev);
1304 /* XXX handle errors */ 1314 /* XXX handle errors */
1305 adev->ip_block_enabled[i] = false; 1315 adev->ip_block_status[i].sw = false;
1316 adev->ip_block_status[i].valid = false;
1306 } 1317 }
1307 1318
1308 return 0; 1319 return 0;
@@ -1313,7 +1324,7 @@ static int amdgpu_suspend(struct amdgpu_device *adev)
1313 int i, r; 1324 int i, r;
1314 1325
1315 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 1326 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1316 if (!adev->ip_block_enabled[i]) 1327 if (!adev->ip_block_status[i].valid)
1317 continue; 1328 continue;
1318 /* ungate blocks so that suspend can properly shut them down */ 1329 /* ungate blocks so that suspend can properly shut them down */
1319 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, 1330 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
@@ -1331,7 +1342,7 @@ static int amdgpu_resume(struct amdgpu_device *adev)
1331 int i, r; 1342 int i, r;
1332 1343
1333 for (i = 0; i < adev->num_ip_blocks; i++) { 1344 for (i = 0; i < adev->num_ip_blocks; i++) {
1334 if (!adev->ip_block_enabled[i]) 1345 if (!adev->ip_block_status[i].valid)
1335 continue; 1346 continue;
1336 r = adev->ip_blocks[i].funcs->resume(adev); 1347 r = adev->ip_blocks[i].funcs->resume(adev);
1337 if (r) 1348 if (r)
@@ -1577,8 +1588,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
1577 amdgpu_fence_driver_fini(adev); 1588 amdgpu_fence_driver_fini(adev);
1578 amdgpu_fbdev_fini(adev); 1589 amdgpu_fbdev_fini(adev);
1579 r = amdgpu_fini(adev); 1590 r = amdgpu_fini(adev);
1580 kfree(adev->ip_block_enabled); 1591 kfree(adev->ip_block_status);
1581 adev->ip_block_enabled = NULL; 1592 adev->ip_block_status = NULL;
1582 adev->accel_working = false; 1593 adev->accel_working = false;
1583 /* free i2c buses */ 1594 /* free i2c buses */
1584 amdgpu_i2c_fini(adev); 1595 amdgpu_i2c_fini(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index ae43b58c9733..4afc507820c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -449,7 +449,7 @@ out:
449 * vital here, so they are not reported back to userspace. 449 * vital here, so they are not reported back to userspace.
450 */ 450 */
451static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, 451static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
452 struct amdgpu_bo_va *bo_va) 452 struct amdgpu_bo_va *bo_va, uint32_t operation)
453{ 453{
454 struct ttm_validate_buffer tv, *entry; 454 struct ttm_validate_buffer tv, *entry;
455 struct amdgpu_bo_list_entry *vm_bos; 455 struct amdgpu_bo_list_entry *vm_bos;
@@ -485,7 +485,9 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
485 if (r) 485 if (r)
486 goto error_unlock; 486 goto error_unlock;
487 487
488 r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem); 488
489 if (operation == AMDGPU_VA_OP_MAP)
490 r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
489 491
490error_unlock: 492error_unlock:
491 mutex_unlock(&bo_va->vm->mutex); 493 mutex_unlock(&bo_va->vm->mutex);
@@ -580,7 +582,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
580 } 582 }
581 583
582 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) 584 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
583 amdgpu_gem_va_update_vm(adev, bo_va); 585 amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
584 586
585 drm_gem_object_unreference_unlocked(gobj); 587 drm_gem_object_unreference_unlocked(gobj);
586 return r; 588 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 52dff75aac6f..bc0fac618a3f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -180,16 +180,16 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
180 if (vm) { 180 if (vm) {
181 /* do context switch */ 181 /* do context switch */
182 amdgpu_vm_flush(ring, vm, ib->sync.last_vm_update); 182 amdgpu_vm_flush(ring, vm, ib->sync.last_vm_update);
183 }
184 183
185 if (vm && ring->funcs->emit_gds_switch) 184 if (ring->funcs->emit_gds_switch)
186 amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id, 185 amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id,
187 ib->gds_base, ib->gds_size, 186 ib->gds_base, ib->gds_size,
188 ib->gws_base, ib->gws_size, 187 ib->gws_base, ib->gws_size,
189 ib->oa_base, ib->oa_size); 188 ib->oa_base, ib->oa_size);
190 189
191 if (ring->funcs->emit_hdp_flush) 190 if (ring->funcs->emit_hdp_flush)
192 amdgpu_ring_emit_hdp_flush(ring); 191 amdgpu_ring_emit_hdp_flush(ring);
192 }
193 193
194 old_ctx = ring->current_ctx; 194 old_ctx = ring->current_ctx;
195 for (i = 0; i < num_ibs; ++i) { 195 for (i = 0; i < num_ibs; ++i) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 5533434c7a8f..3bfe67de8349 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -235,7 +235,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
235 235
236 for (i = 0; i < adev->num_ip_blocks; i++) { 236 for (i = 0; i < adev->num_ip_blocks; i++) {
237 if (adev->ip_blocks[i].type == type && 237 if (adev->ip_blocks[i].type == type &&
238 adev->ip_block_enabled[i]) { 238 adev->ip_block_status[i].valid) {
239 ip.hw_ip_version_major = adev->ip_blocks[i].major; 239 ip.hw_ip_version_major = adev->ip_blocks[i].major;
240 ip.hw_ip_version_minor = adev->ip_blocks[i].minor; 240 ip.hw_ip_version_minor = adev->ip_blocks[i].minor;
241 ip.capabilities_flags = 0; 241 ip.capabilities_flags = 0;
@@ -274,7 +274,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
274 274
275 for (i = 0; i < adev->num_ip_blocks; i++) 275 for (i = 0; i < adev->num_ip_blocks; i++)
276 if (adev->ip_blocks[i].type == type && 276 if (adev->ip_blocks[i].type == type &&
277 adev->ip_block_enabled[i] && 277 adev->ip_block_status[i].valid &&
278 count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT) 278 count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
279 count++; 279 count++;
280 280
@@ -317,16 +317,17 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
317 break; 317 break;
318 case AMDGPU_INFO_FW_GFX_RLC: 318 case AMDGPU_INFO_FW_GFX_RLC:
319 fw_info.ver = adev->gfx.rlc_fw_version; 319 fw_info.ver = adev->gfx.rlc_fw_version;
320 fw_info.feature = 0; 320 fw_info.feature = adev->gfx.rlc_feature_version;
321 break; 321 break;
322 case AMDGPU_INFO_FW_GFX_MEC: 322 case AMDGPU_INFO_FW_GFX_MEC:
323 if (info->query_fw.index == 0) 323 if (info->query_fw.index == 0) {
324 fw_info.ver = adev->gfx.mec_fw_version; 324 fw_info.ver = adev->gfx.mec_fw_version;
325 else if (info->query_fw.index == 1) 325 fw_info.feature = adev->gfx.mec_feature_version;
326 } else if (info->query_fw.index == 1) {
326 fw_info.ver = adev->gfx.mec2_fw_version; 327 fw_info.ver = adev->gfx.mec2_fw_version;
327 else 328 fw_info.feature = adev->gfx.mec2_feature_version;
329 } else
328 return -EINVAL; 330 return -EINVAL;
329 fw_info.feature = 0;
330 break; 331 break;
331 case AMDGPU_INFO_FW_SMC: 332 case AMDGPU_INFO_FW_SMC:
332 fw_info.ver = adev->pm.fw_version; 333 fw_info.ver = adev->pm.fw_version;
@@ -336,7 +337,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
336 if (info->query_fw.index >= 2) 337 if (info->query_fw.index >= 2)
337 return -EINVAL; 338 return -EINVAL;
338 fw_info.ver = adev->sdma[info->query_fw.index].fw_version; 339 fw_info.ver = adev->sdma[info->query_fw.index].fw_version;
339 fw_info.feature = 0; 340 fw_info.feature = adev->sdma[info->query_fw.index].feature_version;
340 break; 341 break;
341 default: 342 default:
342 return -EINVAL; 343 return -EINVAL;
@@ -416,7 +417,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
416 return n ? -EFAULT : 0; 417 return n ? -EFAULT : 0;
417 } 418 }
418 case AMDGPU_INFO_DEV_INFO: { 419 case AMDGPU_INFO_DEV_INFO: {
419 struct drm_amdgpu_info_device dev_info; 420 struct drm_amdgpu_info_device dev_info = {};
420 struct amdgpu_cu_info cu_info; 421 struct amdgpu_cu_info cu_info;
421 422
422 dev_info.device_id = dev->pdev->device; 423 dev_info.device_id = dev->pdev->device;
@@ -459,6 +460,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
459 memcpy(&dev_info.cu_bitmap[0], &cu_info.bitmap[0], sizeof(cu_info.bitmap)); 460 memcpy(&dev_info.cu_bitmap[0], &cu_info.bitmap[0], sizeof(cu_info.bitmap));
460 dev_info.vram_type = adev->mc.vram_type; 461 dev_info.vram_type = adev->mc.vram_type;
461 dev_info.vram_bit_width = adev->mc.vram_width; 462 dev_info.vram_bit_width = adev->mc.vram_width;
463 dev_info.vce_harvest_config = adev->vce.harvest_config;
462 464
463 return copy_to_user(out, &dev_info, 465 return copy_to_user(out, &dev_info,
464 min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0; 466 min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 2f7a5efa21c2..f5c22556ec2c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -374,7 +374,7 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
374 unsigned height_in_mb = ALIGN(height / 16, 2); 374 unsigned height_in_mb = ALIGN(height / 16, 2);
375 unsigned fs_in_mb = width_in_mb * height_in_mb; 375 unsigned fs_in_mb = width_in_mb * height_in_mb;
376 376
377 unsigned image_size, tmp, min_dpb_size, num_dpb_buffer; 377 unsigned image_size, tmp, min_dpb_size, num_dpb_buffer, min_ctx_size;
378 378
379 image_size = width * height; 379 image_size = width * height;
380 image_size += image_size / 2; 380 image_size += image_size / 2;
@@ -466,6 +466,8 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
466 466
467 num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2; 467 num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2;
468 min_dpb_size = image_size * num_dpb_buffer; 468 min_dpb_size = image_size * num_dpb_buffer;
469 min_ctx_size = ((width + 255) / 16) * ((height + 255) / 16)
470 * 16 * num_dpb_buffer + 52 * 1024;
469 break; 471 break;
470 472
471 default: 473 default:
@@ -486,6 +488,7 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
486 488
487 buf_sizes[0x1] = dpb_size; 489 buf_sizes[0x1] = dpb_size;
488 buf_sizes[0x2] = image_size; 490 buf_sizes[0x2] = image_size;
491 buf_sizes[0x4] = min_ctx_size;
489 return 0; 492 return 0;
490} 493}
491 494
@@ -628,6 +631,13 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
628 return -EINVAL; 631 return -EINVAL;
629 } 632 }
630 633
634 } else if (cmd == 0x206) {
635 if ((end - start) < ctx->buf_sizes[4]) {
636 DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
637 (unsigned)(end - start),
638 ctx->buf_sizes[4]);
639 return -EINVAL;
640 }
631 } else if ((cmd != 0x100) && (cmd != 0x204)) { 641 } else if ((cmd != 0x100) && (cmd != 0x204)) {
632 DRM_ERROR("invalid UVD command %X!\n", cmd); 642 DRM_ERROR("invalid UVD command %X!\n", cmd);
633 return -EINVAL; 643 return -EINVAL;
@@ -755,9 +765,10 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
755 struct amdgpu_uvd_cs_ctx ctx = {}; 765 struct amdgpu_uvd_cs_ctx ctx = {};
756 unsigned buf_sizes[] = { 766 unsigned buf_sizes[] = {
757 [0x00000000] = 2048, 767 [0x00000000] = 2048,
758 [0x00000001] = 32 * 1024 * 1024, 768 [0x00000001] = 0xFFFFFFFF,
759 [0x00000002] = 2048 * 1152 * 3, 769 [0x00000002] = 0xFFFFFFFF,
760 [0x00000003] = 2048, 770 [0x00000003] = 2048,
771 [0x00000004] = 0xFFFFFFFF,
761 }; 772 };
762 struct amdgpu_ib *ib = &parser->ibs[ib_idx]; 773 struct amdgpu_ib *ib = &parser->ibs[ib_idx];
763 int r; 774 int r;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index ab83cc1ca4cc..15df46c93f0a 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -500,6 +500,7 @@ static int cik_sdma_load_microcode(struct amdgpu_device *adev)
500 amdgpu_ucode_print_sdma_hdr(&hdr->header); 500 amdgpu_ucode_print_sdma_hdr(&hdr->header);
501 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 501 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
502 adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); 502 adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
503 adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
503 fw_data = (const __le32 *) 504 fw_data = (const __le32 *)
504 (adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 505 (adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
505 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0); 506 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index f75a31df30bd..ace870afc7d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -494,29 +494,67 @@ static void cz_dpm_fini(struct amdgpu_device *adev)
494 amdgpu_free_extended_power_table(adev); 494 amdgpu_free_extended_power_table(adev);
495} 495}
496 496
497#define ixSMUSVI_NB_CURRENTVID 0xD8230044
498#define CURRENT_NB_VID_MASK 0xff000000
499#define CURRENT_NB_VID__SHIFT 24
500#define ixSMUSVI_GFX_CURRENTVID 0xD8230048
501#define CURRENT_GFX_VID_MASK 0xff000000
502#define CURRENT_GFX_VID__SHIFT 24
503
497static void 504static void
498cz_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, 505cz_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
499 struct seq_file *m) 506 struct seq_file *m)
500{ 507{
508 struct cz_power_info *pi = cz_get_pi(adev);
501 struct amdgpu_clock_voltage_dependency_table *table = 509 struct amdgpu_clock_voltage_dependency_table *table =
502 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 510 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
503 u32 current_index = 511 struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table =
504 (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & 512 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
505 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >> 513 struct amdgpu_vce_clock_voltage_dependency_table *vce_table =
506 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT; 514 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
507 u32 sclk, tmp; 515 u32 sclk_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX),
508 u16 vddc; 516 TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX);
509 517 u32 uvd_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
510 if (current_index >= NUM_SCLK_LEVELS) { 518 TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX);
511 seq_printf(m, "invalid dpm profile %d\n", current_index); 519 u32 vce_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
520 TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX);
521 u32 sclk, vclk, dclk, ecclk, tmp;
522 u16 vddnb, vddgfx;
523
524 if (sclk_index >= NUM_SCLK_LEVELS) {
525 seq_printf(m, "invalid sclk dpm profile %d\n", sclk_index);
512 } else { 526 } else {
513 sclk = table->entries[current_index].clk; 527 sclk = table->entries[sclk_index].clk;
514 tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) & 528 seq_printf(m, "%u sclk: %u\n", sclk_index, sclk);
515 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >> 529 }
516 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT; 530
517 vddc = cz_convert_8bit_index_to_voltage(adev, (u16)tmp); 531 tmp = (RREG32_SMC(ixSMUSVI_NB_CURRENTVID) &
518 seq_printf(m, "power level %d sclk: %u vddc: %u\n", 532 CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
519 current_index, sclk, vddc); 533 vddnb = cz_convert_8bit_index_to_voltage(adev, (u16)tmp);
534 tmp = (RREG32_SMC(ixSMUSVI_GFX_CURRENTVID) &
535 CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
536 vddgfx = cz_convert_8bit_index_to_voltage(adev, (u16)tmp);
537 seq_printf(m, "vddnb: %u vddgfx: %u\n", vddnb, vddgfx);
538
539 seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en");
540 if (!pi->uvd_power_gated) {
541 if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
542 seq_printf(m, "invalid uvd dpm level %d\n", uvd_index);
543 } else {
544 vclk = uvd_table->entries[uvd_index].vclk;
545 dclk = uvd_table->entries[uvd_index].dclk;
546 seq_printf(m, "%u uvd vclk: %u dclk: %u\n", uvd_index, vclk, dclk);
547 }
548 }
549
550 seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en");
551 if (!pi->vce_power_gated) {
552 if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
553 seq_printf(m, "invalid vce dpm level %d\n", vce_index);
554 } else {
555 ecclk = vce_table->entries[vce_index].ecclk;
556 seq_printf(m, "%u vce ecclk: %u\n", vce_index, ecclk);
557 }
520 } 558 }
521} 559}
522 560
@@ -1679,25 +1717,31 @@ static int cz_dpm_unforce_dpm_levels(struct amdgpu_device *adev)
1679 if (ret) 1717 if (ret)
1680 return ret; 1718 return ret;
1681 1719
1682 DRM_INFO("DPM unforce state min=%d, max=%d.\n", 1720 DRM_DEBUG("DPM unforce state min=%d, max=%d.\n",
1683 pi->sclk_dpm.soft_min_clk, 1721 pi->sclk_dpm.soft_min_clk,
1684 pi->sclk_dpm.soft_max_clk); 1722 pi->sclk_dpm.soft_max_clk);
1685 1723
1686 return 0; 1724 return 0;
1687} 1725}
1688 1726
1689static int cz_dpm_force_dpm_level(struct amdgpu_device *adev, 1727static int cz_dpm_force_dpm_level(struct amdgpu_device *adev,
1690 enum amdgpu_dpm_forced_level level) 1728 enum amdgpu_dpm_forced_level level)
1691{ 1729{
1692 int ret = 0; 1730 int ret = 0;
1693 1731
1694 switch (level) { 1732 switch (level) {
1695 case AMDGPU_DPM_FORCED_LEVEL_HIGH: 1733 case AMDGPU_DPM_FORCED_LEVEL_HIGH:
1734 ret = cz_dpm_unforce_dpm_levels(adev);
1735 if (ret)
1736 return ret;
1696 ret = cz_dpm_force_highest(adev); 1737 ret = cz_dpm_force_highest(adev);
1697 if (ret) 1738 if (ret)
1698 return ret; 1739 return ret;
1699 break; 1740 break;
1700 case AMDGPU_DPM_FORCED_LEVEL_LOW: 1741 case AMDGPU_DPM_FORCED_LEVEL_LOW:
1742 ret = cz_dpm_unforce_dpm_levels(adev);
1743 if (ret)
1744 return ret;
1701 ret = cz_dpm_force_lowest(adev); 1745 ret = cz_dpm_force_lowest(adev);
1702 if (ret) 1746 if (ret)
1703 return ret; 1747 return ret;
@@ -1711,6 +1755,8 @@ static int cz_dpm_force_dpm_level(struct amdgpu_device *adev,
1711 break; 1755 break;
1712 } 1756 }
1713 1757
1758 adev->pm.dpm.forced_level = level;
1759
1714 return ret; 1760 return ret;
1715} 1761}
1716 1762
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 6e77964f1b64..e70a26f587a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2632,6 +2632,7 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2632 struct drm_device *dev = crtc->dev; 2632 struct drm_device *dev = crtc->dev;
2633 struct amdgpu_device *adev = dev->dev_private; 2633 struct amdgpu_device *adev = dev->dev_private;
2634 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2634 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2635 unsigned type;
2635 2636
2636 switch (mode) { 2637 switch (mode) {
2637 case DRM_MODE_DPMS_ON: 2638 case DRM_MODE_DPMS_ON:
@@ -2640,6 +2641,9 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2640 dce_v10_0_vga_enable(crtc, true); 2641 dce_v10_0_vga_enable(crtc, true);
2641 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); 2642 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2642 dce_v10_0_vga_enable(crtc, false); 2643 dce_v10_0_vga_enable(crtc, false);
2644 /* Make sure VBLANK interrupt is still enabled */
2645 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
2646 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2643 drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); 2647 drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
2644 dce_v10_0_crtc_load_lut(crtc); 2648 dce_v10_0_crtc_load_lut(crtc);
2645 break; 2649 break;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 7f7abb0e0be5..dcb402ee048a 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2631,6 +2631,7 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2631 struct drm_device *dev = crtc->dev; 2631 struct drm_device *dev = crtc->dev;
2632 struct amdgpu_device *adev = dev->dev_private; 2632 struct amdgpu_device *adev = dev->dev_private;
2633 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2633 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2634 unsigned type;
2634 2635
2635 switch (mode) { 2636 switch (mode) {
2636 case DRM_MODE_DPMS_ON: 2637 case DRM_MODE_DPMS_ON:
@@ -2639,6 +2640,9 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2639 dce_v11_0_vga_enable(crtc, true); 2640 dce_v11_0_vga_enable(crtc, true);
2640 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); 2641 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2641 dce_v11_0_vga_enable(crtc, false); 2642 dce_v11_0_vga_enable(crtc, false);
2643 /* Make sure VBLANK interrupt is still enabled */
2644 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
2645 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2642 drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); 2646 drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
2643 dce_v11_0_crtc_load_lut(crtc); 2647 dce_v11_0_crtc_load_lut(crtc);
2644 break; 2648 break;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 08387dfd98a7..cc050a329c49 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2566,6 +2566,7 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2566 struct drm_device *dev = crtc->dev; 2566 struct drm_device *dev = crtc->dev;
2567 struct amdgpu_device *adev = dev->dev_private; 2567 struct amdgpu_device *adev = dev->dev_private;
2568 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2568 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2569 unsigned type;
2569 2570
2570 switch (mode) { 2571 switch (mode) {
2571 case DRM_MODE_DPMS_ON: 2572 case DRM_MODE_DPMS_ON:
@@ -2574,6 +2575,9 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2574 dce_v8_0_vga_enable(crtc, true); 2575 dce_v8_0_vga_enable(crtc, true);
2575 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); 2576 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2576 dce_v8_0_vga_enable(crtc, false); 2577 dce_v8_0_vga_enable(crtc, false);
2578 /* Make sure VBLANK interrupt is still enabled */
2579 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
2580 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2577 drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); 2581 drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
2578 dce_v8_0_crtc_load_lut(crtc); 2582 dce_v8_0_crtc_load_lut(crtc);
2579 break; 2583 break;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 2c188fb9fd22..0d8bf2cb1956 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2561,7 +2561,7 @@ static bool gfx_v7_0_ring_emit_semaphore(struct amdgpu_ring *ring,
2561 * sheduling on the ring. This function schedules the IB 2561 * sheduling on the ring. This function schedules the IB
2562 * on the gfx ring for execution by the GPU. 2562 * on the gfx ring for execution by the GPU.
2563 */ 2563 */
2564static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring, 2564static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
2565 struct amdgpu_ib *ib) 2565 struct amdgpu_ib *ib)
2566{ 2566{
2567 bool need_ctx_switch = ring->current_ctx != ib->ctx; 2567 bool need_ctx_switch = ring->current_ctx != ib->ctx;
@@ -2569,15 +2569,10 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
2569 u32 next_rptr = ring->wptr + 5; 2569 u32 next_rptr = ring->wptr + 5;
2570 2570
2571 /* drop the CE preamble IB for the same context */ 2571 /* drop the CE preamble IB for the same context */
2572 if ((ring->type == AMDGPU_RING_TYPE_GFX) && 2572 if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch)
2573 (ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
2574 !need_ctx_switch)
2575 return; 2573 return;
2576 2574
2577 if (ring->type == AMDGPU_RING_TYPE_COMPUTE) 2575 if (need_ctx_switch)
2578 control |= INDIRECT_BUFFER_VALID;
2579
2580 if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX)
2581 next_rptr += 2; 2576 next_rptr += 2;
2582 2577
2583 next_rptr += 4; 2578 next_rptr += 4;
@@ -2588,7 +2583,7 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
2588 amdgpu_ring_write(ring, next_rptr); 2583 amdgpu_ring_write(ring, next_rptr);
2589 2584
2590 /* insert SWITCH_BUFFER packet before first IB in the ring frame */ 2585 /* insert SWITCH_BUFFER packet before first IB in the ring frame */
2591 if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) { 2586 if (need_ctx_switch) {
2592 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); 2587 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
2593 amdgpu_ring_write(ring, 0); 2588 amdgpu_ring_write(ring, 0);
2594 } 2589 }
@@ -2611,6 +2606,35 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
2611 amdgpu_ring_write(ring, control); 2606 amdgpu_ring_write(ring, control);
2612} 2607}
2613 2608
2609static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
2610 struct amdgpu_ib *ib)
2611{
2612 u32 header, control = 0;
2613 u32 next_rptr = ring->wptr + 5;
2614
2615 control |= INDIRECT_BUFFER_VALID;
2616 next_rptr += 4;
2617 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2618 amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
2619 amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
2620 amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
2621 amdgpu_ring_write(ring, next_rptr);
2622
2623 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
2624
2625 control |= ib->length_dw |
2626 (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
2627
2628 amdgpu_ring_write(ring, header);
2629 amdgpu_ring_write(ring,
2630#ifdef __BIG_ENDIAN
2631 (2 << 0) |
2632#endif
2633 (ib->gpu_addr & 0xFFFFFFFC));
2634 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
2635 amdgpu_ring_write(ring, control);
2636}
2637
2614/** 2638/**
2615 * gfx_v7_0_ring_test_ib - basic ring IB test 2639 * gfx_v7_0_ring_test_ib - basic ring IB test
2616 * 2640 *
@@ -3056,6 +3080,8 @@ static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev)
3056 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; 3080 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
3057 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); 3081 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
3058 adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version); 3082 adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version);
3083 adev->gfx.mec_feature_version = le32_to_cpu(
3084 mec_hdr->ucode_feature_version);
3059 3085
3060 gfx_v7_0_cp_compute_enable(adev, false); 3086 gfx_v7_0_cp_compute_enable(adev, false);
3061 3087
@@ -3078,6 +3104,8 @@ static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev)
3078 mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; 3104 mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
3079 amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header); 3105 amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header);
3080 adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version); 3106 adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version);
3107 adev->gfx.mec2_feature_version = le32_to_cpu(
3108 mec2_hdr->ucode_feature_version);
3081 3109
3082 /* MEC2 */ 3110 /* MEC2 */
3083 fw_data = (const __le32 *) 3111 fw_data = (const __le32 *)
@@ -4042,6 +4070,8 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
4042 hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data; 4070 hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
4043 amdgpu_ucode_print_rlc_hdr(&hdr->header); 4071 amdgpu_ucode_print_rlc_hdr(&hdr->header);
4044 adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version); 4072 adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version);
4073 adev->gfx.rlc_feature_version = le32_to_cpu(
4074 hdr->ucode_feature_version);
4045 4075
4046 gfx_v7_0_rlc_stop(adev); 4076 gfx_v7_0_rlc_stop(adev);
4047 4077
@@ -5098,7 +5128,7 @@ static void gfx_v7_0_print_status(void *handle)
5098 dev_info(adev->dev, " CP_HPD_EOP_CONTROL=0x%08X\n", 5128 dev_info(adev->dev, " CP_HPD_EOP_CONTROL=0x%08X\n",
5099 RREG32(mmCP_HPD_EOP_CONTROL)); 5129 RREG32(mmCP_HPD_EOP_CONTROL));
5100 5130
5101 for (queue = 0; queue < 8; i++) { 5131 for (queue = 0; queue < 8; queue++) {
5102 cik_srbm_select(adev, me, pipe, queue, 0); 5132 cik_srbm_select(adev, me, pipe, queue, 0);
5103 dev_info(adev->dev, " queue: %d\n", queue); 5133 dev_info(adev->dev, " queue: %d\n", queue);
5104 dev_info(adev->dev, " CP_PQ_WPTR_POLL_CNTL=0x%08X\n", 5134 dev_info(adev->dev, " CP_PQ_WPTR_POLL_CNTL=0x%08X\n",
@@ -5555,7 +5585,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
5555 .get_wptr = gfx_v7_0_ring_get_wptr_gfx, 5585 .get_wptr = gfx_v7_0_ring_get_wptr_gfx,
5556 .set_wptr = gfx_v7_0_ring_set_wptr_gfx, 5586 .set_wptr = gfx_v7_0_ring_set_wptr_gfx,
5557 .parse_cs = NULL, 5587 .parse_cs = NULL,
5558 .emit_ib = gfx_v7_0_ring_emit_ib, 5588 .emit_ib = gfx_v7_0_ring_emit_ib_gfx,
5559 .emit_fence = gfx_v7_0_ring_emit_fence_gfx, 5589 .emit_fence = gfx_v7_0_ring_emit_fence_gfx,
5560 .emit_semaphore = gfx_v7_0_ring_emit_semaphore, 5590 .emit_semaphore = gfx_v7_0_ring_emit_semaphore,
5561 .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush, 5591 .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
@@ -5571,7 +5601,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
5571 .get_wptr = gfx_v7_0_ring_get_wptr_compute, 5601 .get_wptr = gfx_v7_0_ring_get_wptr_compute,
5572 .set_wptr = gfx_v7_0_ring_set_wptr_compute, 5602 .set_wptr = gfx_v7_0_ring_set_wptr_compute,
5573 .parse_cs = NULL, 5603 .parse_cs = NULL,
5574 .emit_ib = gfx_v7_0_ring_emit_ib, 5604 .emit_ib = gfx_v7_0_ring_emit_ib_compute,
5575 .emit_fence = gfx_v7_0_ring_emit_fence_compute, 5605 .emit_fence = gfx_v7_0_ring_emit_fence_compute,
5576 .emit_semaphore = gfx_v7_0_ring_emit_semaphore, 5606 .emit_semaphore = gfx_v7_0_ring_emit_semaphore,
5577 .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush, 5607 .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 7b683fb2173c..20e2cfd521d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -587,6 +587,7 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
587 int err; 587 int err;
588 struct amdgpu_firmware_info *info = NULL; 588 struct amdgpu_firmware_info *info = NULL;
589 const struct common_firmware_header *header = NULL; 589 const struct common_firmware_header *header = NULL;
590 const struct gfx_firmware_header_v1_0 *cp_hdr;
590 591
591 DRM_DEBUG("\n"); 592 DRM_DEBUG("\n");
592 593
@@ -611,6 +612,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
611 err = amdgpu_ucode_validate(adev->gfx.pfp_fw); 612 err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
612 if (err) 613 if (err)
613 goto out; 614 goto out;
615 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
616 adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
617 adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
614 618
615 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name); 619 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
616 err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev); 620 err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
@@ -619,6 +623,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
619 err = amdgpu_ucode_validate(adev->gfx.me_fw); 623 err = amdgpu_ucode_validate(adev->gfx.me_fw);
620 if (err) 624 if (err)
621 goto out; 625 goto out;
626 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
627 adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
628 adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
622 629
623 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name); 630 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
624 err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev); 631 err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
@@ -627,12 +634,18 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
627 err = amdgpu_ucode_validate(adev->gfx.ce_fw); 634 err = amdgpu_ucode_validate(adev->gfx.ce_fw);
628 if (err) 635 if (err)
629 goto out; 636 goto out;
637 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
638 adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
639 adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
630 640
631 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name); 641 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
632 err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev); 642 err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
633 if (err) 643 if (err)
634 goto out; 644 goto out;
635 err = amdgpu_ucode_validate(adev->gfx.rlc_fw); 645 err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
646 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
647 adev->gfx.rlc_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
648 adev->gfx.rlc_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
636 649
637 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name); 650 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
638 err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); 651 err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
@@ -641,6 +654,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
641 err = amdgpu_ucode_validate(adev->gfx.mec_fw); 654 err = amdgpu_ucode_validate(adev->gfx.mec_fw);
642 if (err) 655 if (err)
643 goto out; 656 goto out;
657 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
658 adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
659 adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
644 660
645 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name); 661 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
646 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); 662 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
@@ -648,6 +664,12 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
648 err = amdgpu_ucode_validate(adev->gfx.mec2_fw); 664 err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
649 if (err) 665 if (err)
650 goto out; 666 goto out;
667 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
668 adev->gfx.mec2_fw->data;
669 adev->gfx.mec2_fw_version = le32_to_cpu(
670 cp_hdr->header.ucode_version);
671 adev->gfx.mec2_feature_version = le32_to_cpu(
672 cp_hdr->ucode_feature_version);
651 } else { 673 } else {
652 err = 0; 674 err = 0;
653 adev->gfx.mec2_fw = NULL; 675 adev->gfx.mec2_fw = NULL;
@@ -1813,10 +1835,7 @@ static u32 gfx_v8_0_get_rb_disabled(struct amdgpu_device *adev,
1813 u32 data, mask; 1835 u32 data, mask;
1814 1836
1815 data = RREG32(mmCC_RB_BACKEND_DISABLE); 1837 data = RREG32(mmCC_RB_BACKEND_DISABLE);
1816 if (data & 1) 1838 data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
1817 data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
1818 else
1819 data = 0;
1820 1839
1821 data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE); 1840 data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE);
1822 1841
@@ -1986,6 +2005,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
1986 adev->gfx.config.max_shader_engines = 1; 2005 adev->gfx.config.max_shader_engines = 1;
1987 adev->gfx.config.max_tile_pipes = 2; 2006 adev->gfx.config.max_tile_pipes = 2;
1988 adev->gfx.config.max_sh_per_se = 1; 2007 adev->gfx.config.max_sh_per_se = 1;
2008 adev->gfx.config.max_backends_per_se = 2;
1989 2009
1990 switch (adev->pdev->revision) { 2010 switch (adev->pdev->revision) {
1991 case 0xc4: 2011 case 0xc4:
@@ -1994,7 +2014,6 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
1994 case 0xcc: 2014 case 0xcc:
1995 /* B10 */ 2015 /* B10 */
1996 adev->gfx.config.max_cu_per_sh = 8; 2016 adev->gfx.config.max_cu_per_sh = 8;
1997 adev->gfx.config.max_backends_per_se = 2;
1998 break; 2017 break;
1999 case 0xc5: 2018 case 0xc5:
2000 case 0x81: 2019 case 0x81:
@@ -2003,14 +2022,12 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
2003 case 0xcd: 2022 case 0xcd:
2004 /* B8 */ 2023 /* B8 */
2005 adev->gfx.config.max_cu_per_sh = 6; 2024 adev->gfx.config.max_cu_per_sh = 6;
2006 adev->gfx.config.max_backends_per_se = 2;
2007 break; 2025 break;
2008 case 0xc6: 2026 case 0xc6:
2009 case 0xca: 2027 case 0xca:
2010 case 0xce: 2028 case 0xce:
2011 /* B6 */ 2029 /* B6 */
2012 adev->gfx.config.max_cu_per_sh = 6; 2030 adev->gfx.config.max_cu_per_sh = 6;
2013 adev->gfx.config.max_backends_per_se = 2;
2014 break; 2031 break;
2015 case 0xc7: 2032 case 0xc7:
2016 case 0x87: 2033 case 0x87:
@@ -2018,7 +2035,6 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
2018 default: 2035 default:
2019 /* B4 */ 2036 /* B4 */
2020 adev->gfx.config.max_cu_per_sh = 4; 2037 adev->gfx.config.max_cu_per_sh = 4;
2021 adev->gfx.config.max_backends_per_se = 1;
2022 break; 2038 break;
2023 } 2039 }
2024 2040
@@ -2278,7 +2294,6 @@ static int gfx_v8_0_rlc_load_microcode(struct amdgpu_device *adev)
2278 2294
2279 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 2295 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
2280 amdgpu_ucode_print_rlc_hdr(&hdr->header); 2296 amdgpu_ucode_print_rlc_hdr(&hdr->header);
2281 adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version);
2282 2297
2283 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 2298 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2284 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 2299 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
@@ -2364,12 +2379,6 @@ static int gfx_v8_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
2364 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header); 2379 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2365 amdgpu_ucode_print_gfx_hdr(&ce_hdr->header); 2380 amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
2366 amdgpu_ucode_print_gfx_hdr(&me_hdr->header); 2381 amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2367 adev->gfx.pfp_fw_version = le32_to_cpu(pfp_hdr->header.ucode_version);
2368 adev->gfx.ce_fw_version = le32_to_cpu(ce_hdr->header.ucode_version);
2369 adev->gfx.me_fw_version = le32_to_cpu(me_hdr->header.ucode_version);
2370 adev->gfx.me_feature_version = le32_to_cpu(me_hdr->ucode_feature_version);
2371 adev->gfx.ce_feature_version = le32_to_cpu(ce_hdr->ucode_feature_version);
2372 adev->gfx.pfp_feature_version = le32_to_cpu(pfp_hdr->ucode_feature_version);
2373 2382
2374 gfx_v8_0_cp_gfx_enable(adev, false); 2383 gfx_v8_0_cp_gfx_enable(adev, false);
2375 2384
@@ -2625,7 +2634,6 @@ static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev)
2625 2634
2626 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; 2635 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
2627 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); 2636 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
2628 adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version);
2629 2637
2630 fw_data = (const __le32 *) 2638 fw_data = (const __le32 *)
2631 (adev->gfx.mec_fw->data + 2639 (adev->gfx.mec_fw->data +
@@ -2644,7 +2652,6 @@ static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev)
2644 2652
2645 mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; 2653 mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
2646 amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header); 2654 amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header);
2647 adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version);
2648 2655
2649 fw_data = (const __le32 *) 2656 fw_data = (const __le32 *)
2650 (adev->gfx.mec2_fw->data + 2657 (adev->gfx.mec2_fw->data +
@@ -3128,7 +3135,7 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
3128 WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, 3135 WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER,
3129 AMDGPU_DOORBELL_KIQ << 2); 3136 AMDGPU_DOORBELL_KIQ << 2);
3130 WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, 3137 WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER,
3131 0x7FFFF << 2); 3138 AMDGPU_DOORBELL_MEC_RING7 << 2);
3132 } 3139 }
3133 tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL); 3140 tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
3134 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3141 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
@@ -3756,7 +3763,7 @@ static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
3756 amdgpu_ring_write(ring, 0x20); /* poll interval */ 3763 amdgpu_ring_write(ring, 0x20); /* poll interval */
3757} 3764}
3758 3765
3759static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring, 3766static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
3760 struct amdgpu_ib *ib) 3767 struct amdgpu_ib *ib)
3761{ 3768{
3762 bool need_ctx_switch = ring->current_ctx != ib->ctx; 3769 bool need_ctx_switch = ring->current_ctx != ib->ctx;
@@ -3764,15 +3771,10 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
3764 u32 next_rptr = ring->wptr + 5; 3771 u32 next_rptr = ring->wptr + 5;
3765 3772
3766 /* drop the CE preamble IB for the same context */ 3773 /* drop the CE preamble IB for the same context */
3767 if ((ring->type == AMDGPU_RING_TYPE_GFX) && 3774 if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch)
3768 (ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
3769 !need_ctx_switch)
3770 return; 3775 return;
3771 3776
3772 if (ring->type == AMDGPU_RING_TYPE_COMPUTE) 3777 if (need_ctx_switch)
3773 control |= INDIRECT_BUFFER_VALID;
3774
3775 if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX)
3776 next_rptr += 2; 3778 next_rptr += 2;
3777 3779
3778 next_rptr += 4; 3780 next_rptr += 4;
@@ -3783,7 +3785,7 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
3783 amdgpu_ring_write(ring, next_rptr); 3785 amdgpu_ring_write(ring, next_rptr);
3784 3786
3785 /* insert SWITCH_BUFFER packet before first IB in the ring frame */ 3787 /* insert SWITCH_BUFFER packet before first IB in the ring frame */
3786 if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) { 3788 if (need_ctx_switch) {
3787 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); 3789 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3788 amdgpu_ring_write(ring, 0); 3790 amdgpu_ring_write(ring, 0);
3789 } 3791 }
@@ -3806,6 +3808,36 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
3806 amdgpu_ring_write(ring, control); 3808 amdgpu_ring_write(ring, control);
3807} 3809}
3808 3810
3811static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
3812 struct amdgpu_ib *ib)
3813{
3814 u32 header, control = 0;
3815 u32 next_rptr = ring->wptr + 5;
3816
3817 control |= INDIRECT_BUFFER_VALID;
3818
3819 next_rptr += 4;
3820 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3821 amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
3822 amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3823 amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
3824 amdgpu_ring_write(ring, next_rptr);
3825
3826 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
3827
3828 control |= ib->length_dw |
3829 (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
3830
3831 amdgpu_ring_write(ring, header);
3832 amdgpu_ring_write(ring,
3833#ifdef __BIG_ENDIAN
3834 (2 << 0) |
3835#endif
3836 (ib->gpu_addr & 0xFFFFFFFC));
3837 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
3838 amdgpu_ring_write(ring, control);
3839}
3840
3809static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, 3841static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
3810 u64 seq, unsigned flags) 3842 u64 seq, unsigned flags)
3811{ 3843{
@@ -4227,7 +4259,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
4227 .get_wptr = gfx_v8_0_ring_get_wptr_gfx, 4259 .get_wptr = gfx_v8_0_ring_get_wptr_gfx,
4228 .set_wptr = gfx_v8_0_ring_set_wptr_gfx, 4260 .set_wptr = gfx_v8_0_ring_set_wptr_gfx,
4229 .parse_cs = NULL, 4261 .parse_cs = NULL,
4230 .emit_ib = gfx_v8_0_ring_emit_ib, 4262 .emit_ib = gfx_v8_0_ring_emit_ib_gfx,
4231 .emit_fence = gfx_v8_0_ring_emit_fence_gfx, 4263 .emit_fence = gfx_v8_0_ring_emit_fence_gfx,
4232 .emit_semaphore = gfx_v8_0_ring_emit_semaphore, 4264 .emit_semaphore = gfx_v8_0_ring_emit_semaphore,
4233 .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush, 4265 .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
@@ -4243,7 +4275,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
4243 .get_wptr = gfx_v8_0_ring_get_wptr_compute, 4275 .get_wptr = gfx_v8_0_ring_get_wptr_compute,
4244 .set_wptr = gfx_v8_0_ring_set_wptr_compute, 4276 .set_wptr = gfx_v8_0_ring_set_wptr_compute,
4245 .parse_cs = NULL, 4277 .parse_cs = NULL,
4246 .emit_ib = gfx_v8_0_ring_emit_ib, 4278 .emit_ib = gfx_v8_0_ring_emit_ib_compute,
4247 .emit_fence = gfx_v8_0_ring_emit_fence_compute, 4279 .emit_fence = gfx_v8_0_ring_emit_fence_compute,
4248 .emit_semaphore = gfx_v8_0_ring_emit_semaphore, 4280 .emit_semaphore = gfx_v8_0_ring_emit_semaphore,
4249 .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush, 4281 .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index d7895885fe0c..a988dfb1d394 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -121,6 +121,7 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
121 int err, i; 121 int err, i;
122 struct amdgpu_firmware_info *info = NULL; 122 struct amdgpu_firmware_info *info = NULL;
123 const struct common_firmware_header *header = NULL; 123 const struct common_firmware_header *header = NULL;
124 const struct sdma_firmware_header_v1_0 *hdr;
124 125
125 DRM_DEBUG("\n"); 126 DRM_DEBUG("\n");
126 127
@@ -142,6 +143,9 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
142 err = amdgpu_ucode_validate(adev->sdma[i].fw); 143 err = amdgpu_ucode_validate(adev->sdma[i].fw);
143 if (err) 144 if (err)
144 goto out; 145 goto out;
146 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
147 adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
148 adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
145 149
146 if (adev->firmware.smu_load) { 150 if (adev->firmware.smu_load) {
147 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; 151 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
@@ -541,8 +545,6 @@ static int sdma_v2_4_load_microcode(struct amdgpu_device *adev)
541 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; 545 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
542 amdgpu_ucode_print_sdma_hdr(&hdr->header); 546 amdgpu_ucode_print_sdma_hdr(&hdr->header);
543 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 547 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
544 adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
545
546 fw_data = (const __le32 *) 548 fw_data = (const __le32 *)
547 (adev->sdma[i].fw->data + 549 (adev->sdma[i].fw->data +
548 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 550 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 7bb37b93993f..2b86569b18d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -159,6 +159,7 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
159 int err, i; 159 int err, i;
160 struct amdgpu_firmware_info *info = NULL; 160 struct amdgpu_firmware_info *info = NULL;
161 const struct common_firmware_header *header = NULL; 161 const struct common_firmware_header *header = NULL;
162 const struct sdma_firmware_header_v1_0 *hdr;
162 163
163 DRM_DEBUG("\n"); 164 DRM_DEBUG("\n");
164 165
@@ -183,6 +184,9 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
183 err = amdgpu_ucode_validate(adev->sdma[i].fw); 184 err = amdgpu_ucode_validate(adev->sdma[i].fw);
184 if (err) 185 if (err)
185 goto out; 186 goto out;
187 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
188 adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
189 adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
186 190
187 if (adev->firmware.smu_load) { 191 if (adev->firmware.smu_load) {
188 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; 192 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
@@ -630,8 +634,6 @@ static int sdma_v3_0_load_microcode(struct amdgpu_device *adev)
630 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; 634 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
631 amdgpu_ucode_print_sdma_hdr(&hdr->header); 635 amdgpu_ucode_print_sdma_hdr(&hdr->header);
632 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 636 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
633 adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
634
635 fw_data = (const __le32 *) 637 fw_data = (const __le32 *)
636 (adev->sdma[i].fw->data + 638 (adev->sdma[i].fw->data +
637 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 639 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index d62c4002e39c..d1064ca3670e 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -35,6 +35,8 @@
35#include "oss/oss_2_0_d.h" 35#include "oss/oss_2_0_d.h"
36#include "oss/oss_2_0_sh_mask.h" 36#include "oss/oss_2_0_sh_mask.h"
37#include "gca/gfx_8_0_d.h" 37#include "gca/gfx_8_0_d.h"
38#include "smu/smu_7_1_2_d.h"
39#include "smu/smu_7_1_2_sh_mask.h"
38 40
39#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 41#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
40#define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 42#define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10
@@ -112,6 +114,10 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
112 114
113 mutex_lock(&adev->grbm_idx_mutex); 115 mutex_lock(&adev->grbm_idx_mutex);
114 for (idx = 0; idx < 2; ++idx) { 116 for (idx = 0; idx < 2; ++idx) {
117
118 if (adev->vce.harvest_config & (1 << idx))
119 continue;
120
115 if(idx == 0) 121 if(idx == 0)
116 WREG32_P(mmGRBM_GFX_INDEX, 0, 122 WREG32_P(mmGRBM_GFX_INDEX, 0,
117 ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); 123 ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
@@ -190,10 +196,52 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
190 return 0; 196 return 0;
191} 197}
192 198
199#define ixVCE_HARVEST_FUSE_MACRO__ADDRESS 0xC0014074
200#define VCE_HARVEST_FUSE_MACRO__SHIFT 27
201#define VCE_HARVEST_FUSE_MACRO__MASK 0x18000000
202
203static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
204{
205 u32 tmp;
206 unsigned ret;
207
208 if (adev->flags & AMDGPU_IS_APU)
209 tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) &
210 VCE_HARVEST_FUSE_MACRO__MASK) >>
211 VCE_HARVEST_FUSE_MACRO__SHIFT;
212 else
213 tmp = (RREG32_SMC(ixCC_HARVEST_FUSES) &
214 CC_HARVEST_FUSES__VCE_DISABLE_MASK) >>
215 CC_HARVEST_FUSES__VCE_DISABLE__SHIFT;
216
217 switch (tmp) {
218 case 1:
219 ret = AMDGPU_VCE_HARVEST_VCE0;
220 break;
221 case 2:
222 ret = AMDGPU_VCE_HARVEST_VCE1;
223 break;
224 case 3:
225 ret = AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1;
226 break;
227 default:
228 ret = 0;
229 }
230
231 return ret;
232}
233
193static int vce_v3_0_early_init(void *handle) 234static int vce_v3_0_early_init(void *handle)
194{ 235{
195 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 236 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
196 237
238 adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev);
239
240 if ((adev->vce.harvest_config &
241 (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) ==
242 (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1))
243 return -ENOENT;
244
197 vce_v3_0_set_ring_funcs(adev); 245 vce_v3_0_set_ring_funcs(adev);
198 vce_v3_0_set_irq_funcs(adev); 246 vce_v3_0_set_irq_funcs(adev);
199 247
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index fa5a4448531d..68552da40287 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -122,6 +122,32 @@ static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
122 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 122 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
123} 123}
124 124
125/* smu_8_0_d.h */
126#define mmMP0PUB_IND_INDEX 0x180
127#define mmMP0PUB_IND_DATA 0x181
128
129static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg)
130{
131 unsigned long flags;
132 u32 r;
133
134 spin_lock_irqsave(&adev->smc_idx_lock, flags);
135 WREG32(mmMP0PUB_IND_INDEX, (reg));
136 r = RREG32(mmMP0PUB_IND_DATA);
137 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
138 return r;
139}
140
141static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
142{
143 unsigned long flags;
144
145 spin_lock_irqsave(&adev->smc_idx_lock, flags);
146 WREG32(mmMP0PUB_IND_INDEX, (reg));
147 WREG32(mmMP0PUB_IND_DATA, (v));
148 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
149}
150
125static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg) 151static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
126{ 152{
127 unsigned long flags; 153 unsigned long flags;
@@ -1222,8 +1248,13 @@ static int vi_common_early_init(void *handle)
1222 bool smc_enabled = false; 1248 bool smc_enabled = false;
1223 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1249 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1224 1250
1225 adev->smc_rreg = &vi_smc_rreg; 1251 if (adev->flags & AMDGPU_IS_APU) {
1226 adev->smc_wreg = &vi_smc_wreg; 1252 adev->smc_rreg = &cz_smc_rreg;
1253 adev->smc_wreg = &cz_smc_wreg;
1254 } else {
1255 adev->smc_rreg = &vi_smc_rreg;
1256 adev->smc_wreg = &vi_smc_wreg;
1257 }
1227 adev->pcie_rreg = &vi_pcie_rreg; 1258 adev->pcie_rreg = &vi_pcie_rreg;
1228 adev->pcie_wreg = &vi_pcie_wreg; 1259 adev->pcie_wreg = &vi_pcie_wreg;
1229 adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg; 1260 adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg;
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 42d2ffa08716..01ffe9bffe38 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -531,8 +531,6 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
531 531
532 drm_crtc_vblank_off(crtc); 532 drm_crtc_vblank_off(crtc);
533 533
534 crtc->mode = *adj;
535
536 val = dcrtc->dumb_ctrl & ~CFG_DUMB_ENA; 534 val = dcrtc->dumb_ctrl & ~CFG_DUMB_ENA;
537 if (val != dcrtc->dumb_ctrl) { 535 if (val != dcrtc->dumb_ctrl) {
538 dcrtc->dumb_ctrl = val; 536 dcrtc->dumb_ctrl = val;
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 580e10acaa3a..60a688ef81c7 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -69,8 +69,9 @@ void armada_gem_free_object(struct drm_gem_object *obj)
69 69
70 if (dobj->obj.import_attach) { 70 if (dobj->obj.import_attach) {
71 /* We only ever display imported data */ 71 /* We only ever display imported data */
72 dma_buf_unmap_attachment(dobj->obj.import_attach, dobj->sgt, 72 if (dobj->sgt)
73 DMA_TO_DEVICE); 73 dma_buf_unmap_attachment(dobj->obj.import_attach,
74 dobj->sgt, DMA_TO_DEVICE);
74 drm_prime_gem_destroy(&dobj->obj, NULL); 75 drm_prime_gem_destroy(&dobj->obj, NULL);
75 } 76 }
76 77
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index c5b06fdb459c..e939faba7fcc 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -7,6 +7,7 @@
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
8 */ 8 */
9#include <drm/drmP.h> 9#include <drm/drmP.h>
10#include <drm/drm_plane_helper.h>
10#include "armada_crtc.h" 11#include "armada_crtc.h"
11#include "armada_drm.h" 12#include "armada_drm.h"
12#include "armada_fb.h" 13#include "armada_fb.h"
@@ -85,16 +86,8 @@ static void armada_plane_vbl(struct armada_crtc *dcrtc, void *data)
85 86
86 if (fb) 87 if (fb)
87 armada_drm_queue_unref_work(dcrtc->crtc.dev, fb); 88 armada_drm_queue_unref_work(dcrtc->crtc.dev, fb);
88}
89 89
90static unsigned armada_limit(int start, unsigned size, unsigned max) 90 wake_up(&dplane->vbl.wait);
91{
92 int end = start + size;
93 if (end < 0)
94 return 0;
95 if (start < 0)
96 start = 0;
97 return (unsigned)end > max ? max - start : end - start;
98} 91}
99 92
100static int 93static int
@@ -105,26 +98,39 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
105{ 98{
106 struct armada_plane *dplane = drm_to_armada_plane(plane); 99 struct armada_plane *dplane = drm_to_armada_plane(plane);
107 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); 100 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
101 struct drm_rect src = {
102 .x1 = src_x,
103 .y1 = src_y,
104 .x2 = src_x + src_w,
105 .y2 = src_y + src_h,
106 };
107 struct drm_rect dest = {
108 .x1 = crtc_x,
109 .y1 = crtc_y,
110 .x2 = crtc_x + crtc_w,
111 .y2 = crtc_y + crtc_h,
112 };
113 const struct drm_rect clip = {
114 .x2 = crtc->mode.hdisplay,
115 .y2 = crtc->mode.vdisplay,
116 };
108 uint32_t val, ctrl0; 117 uint32_t val, ctrl0;
109 unsigned idx = 0; 118 unsigned idx = 0;
119 bool visible;
110 int ret; 120 int ret;
111 121
112 crtc_w = armada_limit(crtc_x, crtc_w, dcrtc->crtc.mode.hdisplay); 122 ret = drm_plane_helper_check_update(plane, crtc, fb, &src, &dest, &clip,
113 crtc_h = armada_limit(crtc_y, crtc_h, dcrtc->crtc.mode.vdisplay); 123 0, INT_MAX, true, false, &visible);
124 if (ret)
125 return ret;
126
114 ctrl0 = CFG_DMA_FMT(drm_fb_to_armada_fb(fb)->fmt) | 127 ctrl0 = CFG_DMA_FMT(drm_fb_to_armada_fb(fb)->fmt) |
115 CFG_DMA_MOD(drm_fb_to_armada_fb(fb)->mod) | 128 CFG_DMA_MOD(drm_fb_to_armada_fb(fb)->mod) |
116 CFG_CBSH_ENA | CFG_DMA_HSMOOTH | CFG_DMA_ENA; 129 CFG_CBSH_ENA | CFG_DMA_HSMOOTH | CFG_DMA_ENA;
117 130
118 /* Does the position/size result in nothing to display? */ 131 /* Does the position/size result in nothing to display? */
119 if (crtc_w == 0 || crtc_h == 0) { 132 if (!visible)
120 ctrl0 &= ~CFG_DMA_ENA; 133 ctrl0 &= ~CFG_DMA_ENA;
121 }
122
123 /*
124 * FIXME: if the starting point is off screen, we need to
125 * adjust src_x, src_y, src_w, src_h appropriately, and
126 * according to the scale.
127 */
128 134
129 if (!dcrtc->plane) { 135 if (!dcrtc->plane) {
130 dcrtc->plane = plane; 136 dcrtc->plane = plane;
@@ -134,15 +140,19 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
134 /* FIXME: overlay on an interlaced display */ 140 /* FIXME: overlay on an interlaced display */
135 /* Just updating the position/size? */ 141 /* Just updating the position/size? */
136 if (plane->fb == fb && dplane->ctrl0 == ctrl0) { 142 if (plane->fb == fb && dplane->ctrl0 == ctrl0) {
137 val = (src_h & 0xffff0000) | src_w >> 16; 143 val = (drm_rect_height(&src) & 0xffff0000) |
144 drm_rect_width(&src) >> 16;
138 dplane->src_hw = val; 145 dplane->src_hw = val;
139 writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_HPXL_VLN); 146 writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_HPXL_VLN);
140 val = crtc_h << 16 | crtc_w; 147
148 val = drm_rect_height(&dest) << 16 | drm_rect_width(&dest);
141 dplane->dst_hw = val; 149 dplane->dst_hw = val;
142 writel_relaxed(val, dcrtc->base + LCD_SPU_DZM_HPXL_VLN); 150 writel_relaxed(val, dcrtc->base + LCD_SPU_DZM_HPXL_VLN);
143 val = crtc_y << 16 | crtc_x; 151
152 val = dest.y1 << 16 | dest.x1;
144 dplane->dst_yx = val; 153 dplane->dst_yx = val;
145 writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_OVSA_HPXL_VLN); 154 writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_OVSA_HPXL_VLN);
155
146 return 0; 156 return 0;
147 } else if (~dplane->ctrl0 & ctrl0 & CFG_DMA_ENA) { 157 } else if (~dplane->ctrl0 & ctrl0 & CFG_DMA_ENA) {
148 /* Power up the Y/U/V FIFOs on ENA 0->1 transitions */ 158 /* Power up the Y/U/V FIFOs on ENA 0->1 transitions */
@@ -150,15 +160,14 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
150 dcrtc->base + LCD_SPU_SRAM_PARA1); 160 dcrtc->base + LCD_SPU_SRAM_PARA1);
151 } 161 }
152 162
153 ret = wait_event_timeout(dplane->vbl.wait, 163 wait_event_timeout(dplane->vbl.wait,
154 list_empty(&dplane->vbl.update.node), 164 list_empty(&dplane->vbl.update.node),
155 HZ/25); 165 HZ/25);
156 if (ret < 0)
157 return ret;
158 166
159 if (plane->fb != fb) { 167 if (plane->fb != fb) {
160 struct armada_gem_object *obj = drm_fb_obj(fb); 168 struct armada_gem_object *obj = drm_fb_obj(fb);
161 uint32_t sy, su, sv; 169 uint32_t addr[3], pixel_format;
170 int i, num_planes, hsub;
162 171
163 /* 172 /*
164 * Take a reference on the new framebuffer - we want to 173 * Take a reference on the new framebuffer - we want to
@@ -178,26 +187,39 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
178 older_fb); 187 older_fb);
179 } 188 }
180 189
181 src_y >>= 16; 190 src_y = src.y1 >> 16;
182 src_x >>= 16; 191 src_x = src.x1 >> 16;
183 sy = obj->dev_addr + fb->offsets[0] + src_y * fb->pitches[0] +
184 src_x * fb->bits_per_pixel / 8;
185 su = obj->dev_addr + fb->offsets[1] + src_y * fb->pitches[1] +
186 src_x;
187 sv = obj->dev_addr + fb->offsets[2] + src_y * fb->pitches[2] +
188 src_x;
189 192
190 armada_reg_queue_set(dplane->vbl.regs, idx, sy, 193 pixel_format = fb->pixel_format;
194 hsub = drm_format_horz_chroma_subsampling(pixel_format);
195 num_planes = drm_format_num_planes(pixel_format);
196
197 /*
198 * Annoyingly, shifting a YUYV-format image by one pixel
199 * causes the U/V planes to toggle. Toggle the UV swap.
200 * (Unfortunately, this causes momentary colour flickering.)
201 */
202 if (src_x & (hsub - 1) && num_planes == 1)
203 ctrl0 ^= CFG_DMA_MOD(CFG_SWAPUV);
204
205 for (i = 0; i < num_planes; i++)
206 addr[i] = obj->dev_addr + fb->offsets[i] +
207 src_y * fb->pitches[i] +
208 src_x * drm_format_plane_cpp(pixel_format, i);
209 for (; i < ARRAY_SIZE(addr); i++)
210 addr[i] = 0;
211
212 armada_reg_queue_set(dplane->vbl.regs, idx, addr[0],
191 LCD_SPU_DMA_START_ADDR_Y0); 213 LCD_SPU_DMA_START_ADDR_Y0);
192 armada_reg_queue_set(dplane->vbl.regs, idx, su, 214 armada_reg_queue_set(dplane->vbl.regs, idx, addr[1],
193 LCD_SPU_DMA_START_ADDR_U0); 215 LCD_SPU_DMA_START_ADDR_U0);
194 armada_reg_queue_set(dplane->vbl.regs, idx, sv, 216 armada_reg_queue_set(dplane->vbl.regs, idx, addr[2],
195 LCD_SPU_DMA_START_ADDR_V0); 217 LCD_SPU_DMA_START_ADDR_V0);
196 armada_reg_queue_set(dplane->vbl.regs, idx, sy, 218 armada_reg_queue_set(dplane->vbl.regs, idx, addr[0],
197 LCD_SPU_DMA_START_ADDR_Y1); 219 LCD_SPU_DMA_START_ADDR_Y1);
198 armada_reg_queue_set(dplane->vbl.regs, idx, su, 220 armada_reg_queue_set(dplane->vbl.regs, idx, addr[1],
199 LCD_SPU_DMA_START_ADDR_U1); 221 LCD_SPU_DMA_START_ADDR_U1);
200 armada_reg_queue_set(dplane->vbl.regs, idx, sv, 222 armada_reg_queue_set(dplane->vbl.regs, idx, addr[2],
201 LCD_SPU_DMA_START_ADDR_V1); 223 LCD_SPU_DMA_START_ADDR_V1);
202 224
203 val = fb->pitches[0] << 16 | fb->pitches[0]; 225 val = fb->pitches[0] << 16 | fb->pitches[0];
@@ -208,24 +230,27 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
208 LCD_SPU_DMA_PITCH_UV); 230 LCD_SPU_DMA_PITCH_UV);
209 } 231 }
210 232
211 val = (src_h & 0xffff0000) | src_w >> 16; 233 val = (drm_rect_height(&src) & 0xffff0000) | drm_rect_width(&src) >> 16;
212 if (dplane->src_hw != val) { 234 if (dplane->src_hw != val) {
213 dplane->src_hw = val; 235 dplane->src_hw = val;
214 armada_reg_queue_set(dplane->vbl.regs, idx, val, 236 armada_reg_queue_set(dplane->vbl.regs, idx, val,
215 LCD_SPU_DMA_HPXL_VLN); 237 LCD_SPU_DMA_HPXL_VLN);
216 } 238 }
217 val = crtc_h << 16 | crtc_w; 239
240 val = drm_rect_height(&dest) << 16 | drm_rect_width(&dest);
218 if (dplane->dst_hw != val) { 241 if (dplane->dst_hw != val) {
219 dplane->dst_hw = val; 242 dplane->dst_hw = val;
220 armada_reg_queue_set(dplane->vbl.regs, idx, val, 243 armada_reg_queue_set(dplane->vbl.regs, idx, val,
221 LCD_SPU_DZM_HPXL_VLN); 244 LCD_SPU_DZM_HPXL_VLN);
222 } 245 }
223 val = crtc_y << 16 | crtc_x; 246
247 val = dest.y1 << 16 | dest.x1;
224 if (dplane->dst_yx != val) { 248 if (dplane->dst_yx != val) {
225 dplane->dst_yx = val; 249 dplane->dst_yx = val;
226 armada_reg_queue_set(dplane->vbl.regs, idx, val, 250 armada_reg_queue_set(dplane->vbl.regs, idx, val,
227 LCD_SPU_DMA_OVSA_HPXL_VLN); 251 LCD_SPU_DMA_OVSA_HPXL_VLN);
228 } 252 }
253
229 if (dplane->ctrl0 != ctrl0) { 254 if (dplane->ctrl0 != ctrl0) {
230 dplane->ctrl0 = ctrl0; 255 dplane->ctrl0 = ctrl0;
231 armada_reg_queue_mod(dplane->vbl.regs, idx, ctrl0, 256 armada_reg_queue_mod(dplane->vbl.regs, idx, ctrl0,
@@ -279,7 +304,11 @@ static int armada_plane_disable(struct drm_plane *plane)
279 304
280static void armada_plane_destroy(struct drm_plane *plane) 305static void armada_plane_destroy(struct drm_plane *plane)
281{ 306{
282 kfree(plane); 307 struct armada_plane *dplane = drm_to_armada_plane(plane);
308
309 drm_plane_cleanup(plane);
310
311 kfree(dplane);
283} 312}
284 313
285static int armada_plane_set_property(struct drm_plane *plane, 314static int armada_plane_set_property(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index f69b92535505..5ae5c6923128 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -355,6 +355,7 @@ int atmel_hlcdc_crtc_create(struct drm_device *dev)
355 planes->overlays[i]->base.possible_crtcs = 1 << crtc->id; 355 planes->overlays[i]->base.possible_crtcs = 1 << crtc->id;
356 356
357 drm_crtc_helper_add(&crtc->base, &lcdc_crtc_helper_funcs); 357 drm_crtc_helper_add(&crtc->base, &lcdc_crtc_helper_funcs);
358 drm_crtc_vblank_reset(&crtc->base);
358 359
359 dc->crtc = &crtc->base; 360 dc->crtc = &crtc->base;
360 361
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 60b0c13d7ff5..ef6182bc8e5e 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -313,20 +313,20 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
313 313
314 pm_runtime_enable(dev->dev); 314 pm_runtime_enable(dev->dev);
315 315
316 ret = atmel_hlcdc_dc_modeset_init(dev); 316 ret = drm_vblank_init(dev, 1);
317 if (ret < 0) { 317 if (ret < 0) {
318 dev_err(dev->dev, "failed to initialize mode setting\n"); 318 dev_err(dev->dev, "failed to initialize vblank\n");
319 goto err_periph_clk_disable; 319 goto err_periph_clk_disable;
320 } 320 }
321 321
322 drm_mode_config_reset(dev); 322 ret = atmel_hlcdc_dc_modeset_init(dev);
323
324 ret = drm_vblank_init(dev, 1);
325 if (ret < 0) { 323 if (ret < 0) {
326 dev_err(dev->dev, "failed to initialize vblank\n"); 324 dev_err(dev->dev, "failed to initialize mode setting\n");
327 goto err_periph_clk_disable; 325 goto err_periph_clk_disable;
328 } 326 }
329 327
328 drm_mode_config_reset(dev);
329
330 pm_runtime_get_sync(dev->dev); 330 pm_runtime_get_sync(dev->dev);
331 ret = drm_irq_install(dev, dc->hlcdc->irq); 331 ret = drm_irq_install(dev, dc->hlcdc->irq);
332 pm_runtime_put_sync(dev->dev); 332 pm_runtime_put_sync(dev->dev);
@@ -559,7 +559,7 @@ static int atmel_hlcdc_dc_drm_remove(struct platform_device *pdev)
559 return 0; 559 return 0;
560} 560}
561 561
562#ifdef CONFIG_PM 562#ifdef CONFIG_PM_SLEEP
563static int atmel_hlcdc_dc_drm_suspend(struct device *dev) 563static int atmel_hlcdc_dc_drm_suspend(struct device *dev)
564{ 564{
565 struct drm_device *drm_dev = dev_get_drvdata(dev); 565 struct drm_device *drm_dev = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 5b59d5ad7d1c..9dcc7280e572 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -196,7 +196,12 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
196 } 196 }
197 197
198 funcs = connector->helper_private; 198 funcs = connector->helper_private;
199 new_encoder = funcs->best_encoder(connector); 199
200 if (funcs->atomic_best_encoder)
201 new_encoder = funcs->atomic_best_encoder(connector,
202 connector_state);
203 else
204 new_encoder = funcs->best_encoder(connector);
200 205
201 if (!new_encoder) { 206 if (!new_encoder) {
202 DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n", 207 DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n",
@@ -229,6 +234,9 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
229 } 234 }
230 } 235 }
231 236
237 if (WARN_ON(!connector_state->crtc))
238 return -EINVAL;
239
232 connector_state->best_encoder = new_encoder; 240 connector_state->best_encoder = new_encoder;
233 idx = drm_crtc_index(connector_state->crtc); 241 idx = drm_crtc_index(connector_state->crtc);
234 242
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index b9ba06176eb1..fed748311b92 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -2706,8 +2706,11 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
2706 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 2706 if (!drm_core_check_feature(dev, DRIVER_MODESET))
2707 return -EINVAL; 2707 return -EINVAL;
2708 2708
2709 /* For some reason crtc x/y offsets are signed internally. */ 2709 /*
2710 if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX) 2710 * Universal plane src offsets are only 16.16, prevent havoc for
2711 * drivers using universal plane code internally.
2712 */
2713 if (crtc_req->x & 0xffff0000 || crtc_req->y & 0xffff0000)
2711 return -ERANGE; 2714 return -ERANGE;
2712 2715
2713 drm_modeset_lock_all(dev); 2716 drm_modeset_lock_all(dev);
@@ -5395,12 +5398,9 @@ void drm_mode_config_reset(struct drm_device *dev)
5395 if (encoder->funcs->reset) 5398 if (encoder->funcs->reset)
5396 encoder->funcs->reset(encoder); 5399 encoder->funcs->reset(encoder);
5397 5400
5398 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 5401 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
5399 connector->status = connector_status_unknown;
5400
5401 if (connector->funcs->reset) 5402 if (connector->funcs->reset)
5402 connector->funcs->reset(connector); 5403 connector->funcs->reset(connector);
5403 }
5404} 5404}
5405EXPORT_SYMBOL(drm_mode_config_reset); 5405EXPORT_SYMBOL(drm_mode_config_reset);
5406 5406
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 778bbb6425b8..eb603f1defc2 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -873,9 +873,10 @@ static void drm_dp_destroy_port(struct kref *kref)
873 from an EDID retrieval */ 873 from an EDID retrieval */
874 if (port->connector) { 874 if (port->connector) {
875 mutex_lock(&mgr->destroy_connector_lock); 875 mutex_lock(&mgr->destroy_connector_lock);
876 list_add(&port->connector->destroy_list, &mgr->destroy_connector_list); 876 list_add(&port->next, &mgr->destroy_connector_list);
877 mutex_unlock(&mgr->destroy_connector_lock); 877 mutex_unlock(&mgr->destroy_connector_lock);
878 schedule_work(&mgr->destroy_connector_work); 878 schedule_work(&mgr->destroy_connector_work);
879 return;
879 } 880 }
880 drm_dp_port_teardown_pdt(port, port->pdt); 881 drm_dp_port_teardown_pdt(port, port->pdt);
881 882
@@ -1294,7 +1295,6 @@ retry:
1294 goto retry; 1295 goto retry;
1295 } 1296 }
1296 DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret); 1297 DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
1297 WARN(1, "fail\n");
1298 1298
1299 return -EIO; 1299 return -EIO;
1300 } 1300 }
@@ -2660,7 +2660,7 @@ static void drm_dp_tx_work(struct work_struct *work)
2660static void drm_dp_destroy_connector_work(struct work_struct *work) 2660static void drm_dp_destroy_connector_work(struct work_struct *work)
2661{ 2661{
2662 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work); 2662 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
2663 struct drm_connector *connector; 2663 struct drm_dp_mst_port *port;
2664 2664
2665 /* 2665 /*
2666 * Not a regular list traverse as we have to drop the destroy 2666 * Not a regular list traverse as we have to drop the destroy
@@ -2669,15 +2669,21 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
2669 */ 2669 */
2670 for (;;) { 2670 for (;;) {
2671 mutex_lock(&mgr->destroy_connector_lock); 2671 mutex_lock(&mgr->destroy_connector_lock);
2672 connector = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_connector, destroy_list); 2672 port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
2673 if (!connector) { 2673 if (!port) {
2674 mutex_unlock(&mgr->destroy_connector_lock); 2674 mutex_unlock(&mgr->destroy_connector_lock);
2675 break; 2675 break;
2676 } 2676 }
2677 list_del(&connector->destroy_list); 2677 list_del(&port->next);
2678 mutex_unlock(&mgr->destroy_connector_lock); 2678 mutex_unlock(&mgr->destroy_connector_lock);
2679 2679
2680 mgr->cbs->destroy_connector(mgr, connector); 2680 mgr->cbs->destroy_connector(mgr, port->connector);
2681
2682 drm_dp_port_teardown_pdt(port, port->pdt);
2683
2684 if (!port->input && port->vcpi.vcpi > 0)
2685 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
2686 kfree(port);
2681 } 2687 }
2682} 2688}
2683 2689
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index aa8bbb460c57..9cfcd0aef0df 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -70,6 +70,8 @@
70 70
71#define DRM_IOCTL_WAIT_VBLANK32 DRM_IOWR(0x3a, drm_wait_vblank32_t) 71#define DRM_IOCTL_WAIT_VBLANK32 DRM_IOWR(0x3a, drm_wait_vblank32_t)
72 72
73#define DRM_IOCTL_MODE_ADDFB232 DRM_IOWR(0xb8, drm_mode_fb_cmd232_t)
74
73typedef struct drm_version_32 { 75typedef struct drm_version_32 {
74 int version_major; /**< Major version */ 76 int version_major; /**< Major version */
75 int version_minor; /**< Minor version */ 77 int version_minor; /**< Minor version */
@@ -1016,6 +1018,63 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
1016 return 0; 1018 return 0;
1017} 1019}
1018 1020
1021typedef struct drm_mode_fb_cmd232 {
1022 u32 fb_id;
1023 u32 width;
1024 u32 height;
1025 u32 pixel_format;
1026 u32 flags;
1027 u32 handles[4];
1028 u32 pitches[4];
1029 u32 offsets[4];
1030 u64 modifier[4];
1031} __attribute__((packed)) drm_mode_fb_cmd232_t;
1032
1033static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd,
1034 unsigned long arg)
1035{
1036 struct drm_mode_fb_cmd232 __user *argp = (void __user *)arg;
1037 struct drm_mode_fb_cmd232 req32;
1038 struct drm_mode_fb_cmd2 __user *req64;
1039 int i;
1040 int err;
1041
1042 if (copy_from_user(&req32, argp, sizeof(req32)))
1043 return -EFAULT;
1044
1045 req64 = compat_alloc_user_space(sizeof(*req64));
1046
1047 if (!access_ok(VERIFY_WRITE, req64, sizeof(*req64))
1048 || __put_user(req32.width, &req64->width)
1049 || __put_user(req32.height, &req64->height)
1050 || __put_user(req32.pixel_format, &req64->pixel_format)
1051 || __put_user(req32.flags, &req64->flags))
1052 return -EFAULT;
1053
1054 for (i = 0; i < 4; i++) {
1055 if (__put_user(req32.handles[i], &req64->handles[i]))
1056 return -EFAULT;
1057 if (__put_user(req32.pitches[i], &req64->pitches[i]))
1058 return -EFAULT;
1059 if (__put_user(req32.offsets[i], &req64->offsets[i]))
1060 return -EFAULT;
1061 if (__put_user(req32.modifier[i], &req64->modifier[i]))
1062 return -EFAULT;
1063 }
1064
1065 err = drm_ioctl(file, DRM_IOCTL_MODE_ADDFB2, (unsigned long)req64);
1066 if (err)
1067 return err;
1068
1069 if (__get_user(req32.fb_id, &req64->fb_id))
1070 return -EFAULT;
1071
1072 if (copy_to_user(argp, &req32, sizeof(req32)))
1073 return -EFAULT;
1074
1075 return 0;
1076}
1077
1019static drm_ioctl_compat_t *drm_compat_ioctls[] = { 1078static drm_ioctl_compat_t *drm_compat_ioctls[] = {
1020 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version, 1079 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
1021 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique, 1080 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
@@ -1048,6 +1107,7 @@ static drm_ioctl_compat_t *drm_compat_ioctls[] = {
1048 [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW32)] = compat_drm_update_draw, 1107 [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW32)] = compat_drm_update_draw,
1049#endif 1108#endif
1050 [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank, 1109 [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank,
1110 [DRM_IOCTL_NR(DRM_IOCTL_MODE_ADDFB232)] = compat_drm_mode_addfb2,
1051}; 1111};
1052 1112
1053/** 1113/**
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index f9cc68fbd2a3..b50fa0afd907 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -75,7 +75,7 @@ module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600)
75module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600); 75module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
76 76
77static void store_vblank(struct drm_device *dev, int crtc, 77static void store_vblank(struct drm_device *dev, int crtc,
78 unsigned vblank_count_inc, 78 u32 vblank_count_inc,
79 struct timeval *t_vblank) 79 struct timeval *t_vblank)
80{ 80{
81 struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; 81 struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 842d6b8dc3c4..2a652359af64 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1745,7 +1745,6 @@ static int fimc_probe(struct platform_device *pdev)
1745 spin_lock_init(&ctx->lock); 1745 spin_lock_init(&ctx->lock);
1746 platform_set_drvdata(pdev, ctx); 1746 platform_set_drvdata(pdev, ctx);
1747 1747
1748 pm_runtime_set_active(dev);
1749 pm_runtime_enable(dev); 1748 pm_runtime_enable(dev);
1750 1749
1751 ret = exynos_drm_ippdrv_register(ippdrv); 1750 ret = exynos_drm_ippdrv_register(ippdrv);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 8040ed2a831f..f1c6b76c127f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -593,8 +593,7 @@ static int gsc_src_set_transf(struct device *dev,
593 593
594 gsc_write(cfg, GSC_IN_CON); 594 gsc_write(cfg, GSC_IN_CON);
595 595
596 ctx->rotation = cfg & 596 ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0;
597 (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
598 *swap = ctx->rotation; 597 *swap = ctx->rotation;
599 598
600 return 0; 599 return 0;
@@ -857,8 +856,7 @@ static int gsc_dst_set_transf(struct device *dev,
857 856
858 gsc_write(cfg, GSC_IN_CON); 857 gsc_write(cfg, GSC_IN_CON);
859 858
860 ctx->rotation = cfg & 859 ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0;
861 (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
862 *swap = ctx->rotation; 860 *swap = ctx->rotation;
863 861
864 return 0; 862 return 0;
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 99e286489031..4a00990e4ae4 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1064,6 +1064,7 @@ static int hdmi_get_modes(struct drm_connector *connector)
1064{ 1064{
1065 struct hdmi_context *hdata = ctx_from_connector(connector); 1065 struct hdmi_context *hdata = ctx_from_connector(connector);
1066 struct edid *edid; 1066 struct edid *edid;
1067 int ret;
1067 1068
1068 if (!hdata->ddc_adpt) 1069 if (!hdata->ddc_adpt)
1069 return -ENODEV; 1070 return -ENODEV;
@@ -1079,7 +1080,11 @@ static int hdmi_get_modes(struct drm_connector *connector)
1079 1080
1080 drm_mode_connector_update_edid_property(connector, edid); 1081 drm_mode_connector_update_edid_property(connector, edid);
1081 1082
1082 return drm_add_edid_modes(connector, edid); 1083 ret = drm_add_edid_modes(connector, edid);
1084
1085 kfree(edid);
1086
1087 return ret;
1083} 1088}
1084 1089
1085static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock) 1090static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index cae98db33062..4706b56902b4 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -718,6 +718,10 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
718 718
719 /* handling VSYNC */ 719 /* handling VSYNC */
720 if (val & MXR_INT_STATUS_VSYNC) { 720 if (val & MXR_INT_STATUS_VSYNC) {
721 /* vsync interrupt use different bit for read and clear */
722 val |= MXR_INT_CLEAR_VSYNC;
723 val &= ~MXR_INT_STATUS_VSYNC;
724
721 /* interlace scan need to check shadow register */ 725 /* interlace scan need to check shadow register */
722 if (ctx->interlace) { 726 if (ctx->interlace) {
723 base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0)); 727 base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0));
@@ -743,11 +747,6 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
743 747
744out: 748out:
745 /* clear interrupts */ 749 /* clear interrupts */
746 if (~val & MXR_INT_EN_VSYNC) {
747 /* vsync interrupt use different bit for read and clear */
748 val &= ~MXR_INT_EN_VSYNC;
749 val |= MXR_INT_CLEAR_VSYNC;
750 }
751 mixer_reg_write(res, MXR_INT_STATUS, val); 750 mixer_reg_write(res, MXR_INT_STATUS, val);
752 751
753 spin_unlock(&res->reg_slock); 752 spin_unlock(&res->reg_slock);
@@ -907,8 +906,8 @@ static int mixer_enable_vblank(struct exynos_drm_crtc *crtc)
907 } 906 }
908 907
909 /* enable vsync interrupt */ 908 /* enable vsync interrupt */
910 mixer_reg_writemask(res, MXR_INT_EN, MXR_INT_EN_VSYNC, 909 mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
911 MXR_INT_EN_VSYNC); 910 mixer_reg_writemask(res, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC);
912 911
913 return 0; 912 return 0;
914} 913}
@@ -918,7 +917,13 @@ static void mixer_disable_vblank(struct exynos_drm_crtc *crtc)
918 struct mixer_context *mixer_ctx = crtc->ctx; 917 struct mixer_context *mixer_ctx = crtc->ctx;
919 struct mixer_resources *res = &mixer_ctx->mixer_res; 918 struct mixer_resources *res = &mixer_ctx->mixer_res;
920 919
920 if (!mixer_ctx->powered) {
921 mixer_ctx->int_en &= MXR_INT_EN_VSYNC;
922 return;
923 }
924
921 /* disable vsync interrupt */ 925 /* disable vsync interrupt */
926 mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
922 mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC); 927 mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
923} 928}
924 929
@@ -1047,6 +1052,8 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
1047 1052
1048 mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET); 1053 mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
1049 1054
1055 if (ctx->int_en & MXR_INT_EN_VSYNC)
1056 mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
1050 mixer_reg_write(res, MXR_INT_EN, ctx->int_en); 1057 mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
1051 mixer_win_reset(ctx); 1058 mixer_win_reset(ctx);
1052} 1059}
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index fe1599d75f14..424228be79ae 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -606,8 +606,6 @@ static void
606tda998x_write_if(struct tda998x_priv *priv, uint8_t bit, uint16_t addr, 606tda998x_write_if(struct tda998x_priv *priv, uint8_t bit, uint16_t addr,
607 uint8_t *buf, size_t size) 607 uint8_t *buf, size_t size)
608{ 608{
609 buf[PB(0)] = tda998x_cksum(buf, size);
610
611 reg_clear(priv, REG_DIP_IF_FLAGS, bit); 609 reg_clear(priv, REG_DIP_IF_FLAGS, bit);
612 reg_write_range(priv, addr, buf, size); 610 reg_write_range(priv, addr, buf, size);
613 reg_set(priv, REG_DIP_IF_FLAGS, bit); 611 reg_set(priv, REG_DIP_IF_FLAGS, bit);
@@ -627,6 +625,8 @@ tda998x_write_aif(struct tda998x_priv *priv, struct tda998x_encoder_params *p)
627 buf[PB(4)] = p->audio_frame[4]; 625 buf[PB(4)] = p->audio_frame[4];
628 buf[PB(5)] = p->audio_frame[5] & 0xf8; /* DM_INH + LSV */ 626 buf[PB(5)] = p->audio_frame[5] & 0xf8; /* DM_INH + LSV */
629 627
628 buf[PB(0)] = tda998x_cksum(buf, sizeof(buf));
629
630 tda998x_write_if(priv, DIP_IF_FLAGS_IF4, REG_IF4_HB0, buf, 630 tda998x_write_if(priv, DIP_IF_FLAGS_IF4, REG_IF4_HB0, buf,
631 sizeof(buf)); 631 sizeof(buf));
632} 632}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 542fac628b28..fd1de451c8c6 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -826,6 +826,7 @@ struct intel_context {
826 struct kref ref; 826 struct kref ref;
827 int user_handle; 827 int user_handle;
828 uint8_t remap_slice; 828 uint8_t remap_slice;
829 struct drm_i915_private *i915;
829 struct drm_i915_file_private *file_priv; 830 struct drm_i915_file_private *file_priv;
830 struct i915_ctx_hang_stats hang_stats; 831 struct i915_ctx_hang_stats hang_stats;
831 struct i915_hw_ppgtt *ppgtt; 832 struct i915_hw_ppgtt *ppgtt;
@@ -2036,8 +2037,6 @@ struct drm_i915_gem_object {
2036 unsigned int cache_level:3; 2037 unsigned int cache_level:3;
2037 unsigned int cache_dirty:1; 2038 unsigned int cache_dirty:1;
2038 2039
2039 unsigned int has_dma_mapping:1;
2040
2041 unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS; 2040 unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
2042 2041
2043 unsigned int pin_display; 2042 unsigned int pin_display;
@@ -3116,7 +3115,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor);
3116int i915_debugfs_connector_add(struct drm_connector *connector); 3115int i915_debugfs_connector_add(struct drm_connector *connector);
3117void intel_display_crc_init(struct drm_device *dev); 3116void intel_display_crc_init(struct drm_device *dev);
3118#else 3117#else
3119static inline int i915_debugfs_connector_add(struct drm_connector *connector) {} 3118static inline int i915_debugfs_connector_add(struct drm_connector *connector)
3119{ return 0; }
3120static inline void intel_display_crc_init(struct drm_device *dev) {} 3120static inline void intel_display_crc_init(struct drm_device *dev) {}
3121#endif 3121#endif
3122 3122
@@ -3303,15 +3303,14 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
3303#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true) 3303#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
3304 3304
3305#define I915_READ64_2x32(lower_reg, upper_reg) ({ \ 3305#define I915_READ64_2x32(lower_reg, upper_reg) ({ \
3306 u32 upper = I915_READ(upper_reg); \ 3306 u32 upper, lower, tmp; \
3307 u32 lower = I915_READ(lower_reg); \ 3307 tmp = I915_READ(upper_reg); \
3308 u32 tmp = I915_READ(upper_reg); \ 3308 do { \
3309 if (upper != tmp) { \ 3309 upper = tmp; \
3310 upper = tmp; \ 3310 lower = I915_READ(lower_reg); \
3311 lower = I915_READ(lower_reg); \ 3311 tmp = I915_READ(upper_reg); \
3312 WARN_ON(I915_READ(upper_reg) != upper); \ 3312 } while (upper != tmp); \
3313 } \ 3313 (u64)upper << 32 | lower; })
3314 (u64)upper << 32 | lower; })
3315 3314
3316#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) 3315#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
3317#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) 3316#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 248fd1ac7b3a..52b446b27b4d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -213,7 +213,6 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
213 sg_dma_len(sg) = obj->base.size; 213 sg_dma_len(sg) = obj->base.size;
214 214
215 obj->pages = st; 215 obj->pages = st;
216 obj->has_dma_mapping = true;
217 return 0; 216 return 0;
218} 217}
219 218
@@ -265,8 +264,6 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
265 264
266 sg_free_table(obj->pages); 265 sg_free_table(obj->pages);
267 kfree(obj->pages); 266 kfree(obj->pages);
268
269 obj->has_dma_mapping = false;
270} 267}
271 268
272static void 269static void
@@ -2139,6 +2136,8 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
2139 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; 2136 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2140 } 2137 }
2141 2138
2139 i915_gem_gtt_finish_object(obj);
2140
2142 if (i915_gem_object_needs_bit17_swizzle(obj)) 2141 if (i915_gem_object_needs_bit17_swizzle(obj))
2143 i915_gem_object_save_bit_17_swizzle(obj); 2142 i915_gem_object_save_bit_17_swizzle(obj);
2144 2143
@@ -2199,6 +2198,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2199 struct sg_page_iter sg_iter; 2198 struct sg_page_iter sg_iter;
2200 struct page *page; 2199 struct page *page;
2201 unsigned long last_pfn = 0; /* suppress gcc warning */ 2200 unsigned long last_pfn = 0; /* suppress gcc warning */
2201 int ret;
2202 gfp_t gfp; 2202 gfp_t gfp;
2203 2203
2204 /* Assert that the object is not currently in any GPU domain. As it 2204 /* Assert that the object is not currently in any GPU domain. As it
@@ -2246,8 +2246,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2246 */ 2246 */
2247 i915_gem_shrink_all(dev_priv); 2247 i915_gem_shrink_all(dev_priv);
2248 page = shmem_read_mapping_page(mapping, i); 2248 page = shmem_read_mapping_page(mapping, i);
2249 if (IS_ERR(page)) 2249 if (IS_ERR(page)) {
2250 ret = PTR_ERR(page);
2250 goto err_pages; 2251 goto err_pages;
2252 }
2251 } 2253 }
2252#ifdef CONFIG_SWIOTLB 2254#ifdef CONFIG_SWIOTLB
2253 if (swiotlb_nr_tbl()) { 2255 if (swiotlb_nr_tbl()) {
@@ -2276,6 +2278,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2276 sg_mark_end(sg); 2278 sg_mark_end(sg);
2277 obj->pages = st; 2279 obj->pages = st;
2278 2280
2281 ret = i915_gem_gtt_prepare_object(obj);
2282 if (ret)
2283 goto err_pages;
2284
2279 if (i915_gem_object_needs_bit17_swizzle(obj)) 2285 if (i915_gem_object_needs_bit17_swizzle(obj))
2280 i915_gem_object_do_bit_17_swizzle(obj); 2286 i915_gem_object_do_bit_17_swizzle(obj);
2281 2287
@@ -2300,10 +2306,10 @@ err_pages:
2300 * space and so want to translate the error from shmemfs back to our 2306 * space and so want to translate the error from shmemfs back to our
2301 * usual understanding of ENOMEM. 2307 * usual understanding of ENOMEM.
2302 */ 2308 */
2303 if (PTR_ERR(page) == -ENOSPC) 2309 if (ret == -ENOSPC)
2304 return -ENOMEM; 2310 ret = -ENOMEM;
2305 else 2311
2306 return PTR_ERR(page); 2312 return ret;
2307} 2313}
2308 2314
2309/* Ensure that the associated pages are gathered from the backing storage 2315/* Ensure that the associated pages are gathered from the backing storage
@@ -2542,6 +2548,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
2542 } 2548 }
2543 2549
2544 request->emitted_jiffies = jiffies; 2550 request->emitted_jiffies = jiffies;
2551 ring->last_submitted_seqno = request->seqno;
2545 list_add_tail(&request->list, &ring->request_list); 2552 list_add_tail(&request->list, &ring->request_list);
2546 request->file_priv = NULL; 2553 request->file_priv = NULL;
2547 2554
@@ -3247,10 +3254,8 @@ int i915_vma_unbind(struct i915_vma *vma)
3247 3254
3248 /* Since the unbound list is global, only move to that list if 3255 /* Since the unbound list is global, only move to that list if
3249 * no more VMAs exist. */ 3256 * no more VMAs exist. */
3250 if (list_empty(&obj->vma_list)) { 3257 if (list_empty(&obj->vma_list))
3251 i915_gem_gtt_finish_object(obj);
3252 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list); 3258 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
3253 }
3254 3259
3255 /* And finally now the object is completely decoupled from this vma, 3260 /* And finally now the object is completely decoupled from this vma,
3256 * we can drop its hold on the backing storage and allow it to be 3261 * we can drop its hold on the backing storage and allow it to be
@@ -3768,22 +3773,16 @@ search_free:
3768 goto err_remove_node; 3773 goto err_remove_node;
3769 } 3774 }
3770 3775
3771 ret = i915_gem_gtt_prepare_object(obj);
3772 if (ret)
3773 goto err_remove_node;
3774
3775 trace_i915_vma_bind(vma, flags); 3776 trace_i915_vma_bind(vma, flags);
3776 ret = i915_vma_bind(vma, obj->cache_level, flags); 3777 ret = i915_vma_bind(vma, obj->cache_level, flags);
3777 if (ret) 3778 if (ret)
3778 goto err_finish_gtt; 3779 goto err_remove_node;
3779 3780
3780 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); 3781 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3781 list_add_tail(&vma->mm_list, &vm->inactive_list); 3782 list_add_tail(&vma->mm_list, &vm->inactive_list);
3782 3783
3783 return vma; 3784 return vma;
3784 3785
3785err_finish_gtt:
3786 i915_gem_gtt_finish_object(obj);
3787err_remove_node: 3786err_remove_node:
3788 drm_mm_remove_node(&vma->node); 3787 drm_mm_remove_node(&vma->node);
3789err_free_vma: 3788err_free_vma:
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index d65cbe6afb92..48afa777e94a 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -135,8 +135,7 @@ static int get_context_size(struct drm_device *dev)
135 135
136void i915_gem_context_free(struct kref *ctx_ref) 136void i915_gem_context_free(struct kref *ctx_ref)
137{ 137{
138 struct intel_context *ctx = container_of(ctx_ref, 138 struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
139 typeof(*ctx), ref);
140 139
141 trace_i915_context_free(ctx); 140 trace_i915_context_free(ctx);
142 141
@@ -195,6 +194,7 @@ __create_hw_context(struct drm_device *dev,
195 194
196 kref_init(&ctx->ref); 195 kref_init(&ctx->ref);
197 list_add_tail(&ctx->link, &dev_priv->context_list); 196 list_add_tail(&ctx->link, &dev_priv->context_list);
197 ctx->i915 = dev_priv;
198 198
199 if (dev_priv->hw_context_size) { 199 if (dev_priv->hw_context_size) {
200 struct drm_i915_gem_object *obj = 200 struct drm_i915_gem_object *obj =
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 7998da27c500..e9c2bfd85b52 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -256,7 +256,6 @@ static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
256 return PTR_ERR(sg); 256 return PTR_ERR(sg);
257 257
258 obj->pages = sg; 258 obj->pages = sg;
259 obj->has_dma_mapping = true;
260 return 0; 259 return 0;
261} 260}
262 261
@@ -264,7 +263,6 @@ static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
264{ 263{
265 dma_buf_unmap_attachment(obj->base.import_attach, 264 dma_buf_unmap_attachment(obj->base.import_attach,
266 obj->pages, DMA_BIDIRECTIONAL); 265 obj->pages, DMA_BIDIRECTIONAL);
267 obj->has_dma_mapping = false;
268} 266}
269 267
270static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = { 268static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index dcc6a88c560e..31e8269e6e3d 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1723,9 +1723,6 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
1723 1723
1724int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) 1724int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
1725{ 1725{
1726 if (obj->has_dma_mapping)
1727 return 0;
1728
1729 if (!dma_map_sg(&obj->base.dev->pdev->dev, 1726 if (!dma_map_sg(&obj->base.dev->pdev->dev,
1730 obj->pages->sgl, obj->pages->nents, 1727 obj->pages->sgl, obj->pages->nents,
1731 PCI_DMA_BIDIRECTIONAL)) 1728 PCI_DMA_BIDIRECTIONAL))
@@ -1926,6 +1923,17 @@ static int ggtt_bind_vma(struct i915_vma *vma,
1926 vma->vm->insert_entries(vma->vm, pages, 1923 vma->vm->insert_entries(vma->vm, pages,
1927 vma->node.start, 1924 vma->node.start,
1928 cache_level, pte_flags); 1925 cache_level, pte_flags);
1926
1927 /* Note the inconsistency here is due to absence of the
1928 * aliasing ppgtt on gen4 and earlier. Though we always
1929 * request PIN_USER for execbuffer (translated to LOCAL_BIND),
1930 * without the appgtt, we cannot honour that request and so
1931 * must substitute it with a global binding. Since we do this
1932 * behind the upper layers back, we need to explicitly set
1933 * the bound flag ourselves.
1934 */
1935 vma->bound |= GLOBAL_BIND;
1936
1929 } 1937 }
1930 1938
1931 if (dev_priv->mm.aliasing_ppgtt && flags & LOCAL_BIND) { 1939 if (dev_priv->mm.aliasing_ppgtt && flags & LOCAL_BIND) {
@@ -1972,10 +1980,8 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
1972 1980
1973 interruptible = do_idling(dev_priv); 1981 interruptible = do_idling(dev_priv);
1974 1982
1975 if (!obj->has_dma_mapping) 1983 dma_unmap_sg(&dev->pdev->dev, obj->pages->sgl, obj->pages->nents,
1976 dma_unmap_sg(&dev->pdev->dev, 1984 PCI_DMA_BIDIRECTIONAL);
1977 obj->pages->sgl, obj->pages->nents,
1978 PCI_DMA_BIDIRECTIONAL);
1979 1985
1980 undo_idling(dev_priv, interruptible); 1986 undo_idling(dev_priv, interruptible);
1981} 1987}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 348ed5abcdbf..8b5b784c62fe 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -416,7 +416,6 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
416 if (obj->pages == NULL) 416 if (obj->pages == NULL)
417 goto cleanup; 417 goto cleanup;
418 418
419 obj->has_dma_mapping = true;
420 i915_gem_object_pin_pages(obj); 419 i915_gem_object_pin_pages(obj);
421 obj->stolen = stolen; 420 obj->stolen = stolen;
422 421
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index d61e74a08f82..d19c9db5e18c 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -183,18 +183,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
183 if (IS_GEN4(dev)) { 183 if (IS_GEN4(dev)) {
184 uint32_t ddc2 = I915_READ(DCC2); 184 uint32_t ddc2 = I915_READ(DCC2);
185 185
186 if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE)) { 186 if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE))
187 /* Since the swizzling may vary within an
188 * object, we have no idea what the swizzling
189 * is for any page in particular. Thus we
190 * cannot migrate tiled pages using the GPU,
191 * nor can we tell userspace what the exact
192 * swizzling is for any object.
193 */
194 dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES; 187 dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
195 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
196 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
197 }
198 } 188 }
199 189
200 if (dcc == 0xffffffff) { 190 if (dcc == 0xffffffff) {
@@ -474,7 +464,10 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
474 } 464 }
475 465
476 /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */ 466 /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
477 args->phys_swizzle_mode = args->swizzle_mode; 467 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
468 args->phys_swizzle_mode = I915_BIT_6_SWIZZLE_UNKNOWN;
469 else
470 args->phys_swizzle_mode = args->swizzle_mode;
478 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) 471 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
479 args->swizzle_mode = I915_BIT_6_SWIZZLE_9; 472 args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
480 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) 473 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 1f4e5a32a16e..8fd431bcdfd3 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -545,6 +545,26 @@ err:
545 return ret; 545 return ret;
546} 546}
547 547
548static int
549__i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj,
550 struct page **pvec, int num_pages)
551{
552 int ret;
553
554 ret = st_set_pages(&obj->pages, pvec, num_pages);
555 if (ret)
556 return ret;
557
558 ret = i915_gem_gtt_prepare_object(obj);
559 if (ret) {
560 sg_free_table(obj->pages);
561 kfree(obj->pages);
562 obj->pages = NULL;
563 }
564
565 return ret;
566}
567
548static void 568static void
549__i915_gem_userptr_get_pages_worker(struct work_struct *_work) 569__i915_gem_userptr_get_pages_worker(struct work_struct *_work)
550{ 570{
@@ -584,9 +604,12 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
584 if (obj->userptr.work != &work->work) { 604 if (obj->userptr.work != &work->work) {
585 ret = 0; 605 ret = 0;
586 } else if (pinned == num_pages) { 606 } else if (pinned == num_pages) {
587 ret = st_set_pages(&obj->pages, pvec, num_pages); 607 ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
588 if (ret == 0) { 608 if (ret == 0) {
589 list_add_tail(&obj->global_list, &to_i915(dev)->mm.unbound_list); 609 list_add_tail(&obj->global_list, &to_i915(dev)->mm.unbound_list);
610 obj->get_page.sg = obj->pages->sgl;
611 obj->get_page.last = 0;
612
590 pinned = 0; 613 pinned = 0;
591 } 614 }
592 } 615 }
@@ -693,7 +716,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
693 } 716 }
694 } 717 }
695 } else { 718 } else {
696 ret = st_set_pages(&obj->pages, pvec, num_pages); 719 ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
697 if (ret == 0) { 720 if (ret == 0) {
698 obj->userptr.work = NULL; 721 obj->userptr.work = NULL;
699 pinned = 0; 722 pinned = 0;
@@ -715,6 +738,8 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
715 if (obj->madv != I915_MADV_WILLNEED) 738 if (obj->madv != I915_MADV_WILLNEED)
716 obj->dirty = 0; 739 obj->dirty = 0;
717 740
741 i915_gem_gtt_finish_object(obj);
742
718 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { 743 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
719 struct page *page = sg_page_iter_page(&sg_iter); 744 struct page *page = sg_page_iter_page(&sg_iter);
720 745
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index 176de6322e4d..23aa04cded6b 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -204,7 +204,7 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
204 drm_ioctl_compat_t *fn = NULL; 204 drm_ioctl_compat_t *fn = NULL;
205 int ret; 205 int ret;
206 206
207 if (nr < DRM_COMMAND_BASE) 207 if (nr < DRM_COMMAND_BASE || nr >= DRM_COMMAND_END)
208 return drm_compat_ioctl(filp, cmd, arg); 208 return drm_compat_ioctl(filp, cmd, arg);
209 209
210 if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls)) 210 if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls))
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index e6bb72dca3ff..984e2fe6688c 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2706,18 +2706,11 @@ static void gen8_disable_vblank(struct drm_device *dev, int pipe)
2706 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2706 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2707} 2707}
2708 2708
2709static struct drm_i915_gem_request *
2710ring_last_request(struct intel_engine_cs *ring)
2711{
2712 return list_entry(ring->request_list.prev,
2713 struct drm_i915_gem_request, list);
2714}
2715
2716static bool 2709static bool
2717ring_idle(struct intel_engine_cs *ring) 2710ring_idle(struct intel_engine_cs *ring, u32 seqno)
2718{ 2711{
2719 return (list_empty(&ring->request_list) || 2712 return (list_empty(&ring->request_list) ||
2720 i915_gem_request_completed(ring_last_request(ring), false)); 2713 i915_seqno_passed(seqno, ring->last_submitted_seqno));
2721} 2714}
2722 2715
2723static bool 2716static bool
@@ -2939,7 +2932,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
2939 acthd = intel_ring_get_active_head(ring); 2932 acthd = intel_ring_get_active_head(ring);
2940 2933
2941 if (ring->hangcheck.seqno == seqno) { 2934 if (ring->hangcheck.seqno == seqno) {
2942 if (ring_idle(ring)) { 2935 if (ring_idle(ring, seqno)) {
2943 ring->hangcheck.action = HANGCHECK_IDLE; 2936 ring->hangcheck.action = HANGCHECK_IDLE;
2944 2937
2945 if (waitqueue_active(&ring->irq_queue)) { 2938 if (waitqueue_active(&ring->irq_queue)) {
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 497cba5deb1e..849a2590e010 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -727,7 +727,7 @@ DECLARE_EVENT_CLASS(i915_context,
727 TP_fast_assign( 727 TP_fast_assign(
728 __entry->ctx = ctx; 728 __entry->ctx = ctx;
729 __entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL; 729 __entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
730 __entry->dev = ctx->file_priv->dev_priv->dev->primary->index; 730 __entry->dev = ctx->i915->dev->primary->index;
731 ), 731 ),
732 732
733 TP_printk("dev=%u, ctx=%p, ctx_vm=%p", 733 TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index 7ed8033aae60..8e35e0d013df 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -129,8 +129,9 @@ int intel_atomic_commit(struct drm_device *dev,
129 struct drm_atomic_state *state, 129 struct drm_atomic_state *state,
130 bool async) 130 bool async)
131{ 131{
132 int ret; 132 struct drm_crtc_state *crtc_state;
133 int i; 133 struct drm_crtc *crtc;
134 int ret, i;
134 135
135 if (async) { 136 if (async) {
136 DRM_DEBUG_KMS("i915 does not yet support async commit\n"); 137 DRM_DEBUG_KMS("i915 does not yet support async commit\n");
@@ -142,48 +143,18 @@ int intel_atomic_commit(struct drm_device *dev,
142 return ret; 143 return ret;
143 144
144 /* Point of no return */ 145 /* Point of no return */
145 146 drm_atomic_helper_swap_state(dev, state);
146 /*
147 * FIXME: The proper sequence here will eventually be:
148 *
149 * drm_atomic_helper_swap_state(dev, state)
150 * drm_atomic_helper_commit_modeset_disables(dev, state);
151 * drm_atomic_helper_commit_planes(dev, state);
152 * drm_atomic_helper_commit_modeset_enables(dev, state);
153 * drm_atomic_helper_wait_for_vblanks(dev, state);
154 * drm_atomic_helper_cleanup_planes(dev, state);
155 * drm_atomic_state_free(state);
156 *
157 * once we have full atomic modeset. For now, just manually update
158 * plane states to avoid clobbering good states with dummy states
159 * while nuclear pageflipping.
160 */
161 for (i = 0; i < dev->mode_config.num_total_plane; i++) {
162 struct drm_plane *plane = state->planes[i];
163
164 if (!plane)
165 continue;
166
167 plane->state->state = state;
168 swap(state->plane_states[i], plane->state);
169 plane->state->state = NULL;
170 }
171 147
172 /* swap crtc_scaler_state */ 148 /* swap crtc_scaler_state */
173 for (i = 0; i < dev->mode_config.num_crtc; i++) { 149 for_each_crtc_in_state(state, crtc, crtc_state, i) {
174 struct drm_crtc *crtc = state->crtcs[i]; 150 to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
175 if (!crtc) {
176 continue;
177 }
178
179 to_intel_crtc(crtc)->config->scaler_state =
180 to_intel_crtc_state(state->crtc_states[i])->scaler_state;
181 151
182 if (INTEL_INFO(dev)->gen >= 9) 152 if (INTEL_INFO(dev)->gen >= 9)
183 skl_detach_scalers(to_intel_crtc(crtc)); 153 skl_detach_scalers(to_intel_crtc(crtc));
154
155 drm_atomic_helper_commit_planes_on_crtc(crtc_state);
184 } 156 }
185 157
186 drm_atomic_helper_commit_planes(dev, state);
187 drm_atomic_helper_wait_for_vblanks(dev, state); 158 drm_atomic_helper_wait_for_vblanks(dev, state);
188 drm_atomic_helper_cleanup_planes(dev, state); 159 drm_atomic_helper_cleanup_planes(dev, state);
189 drm_atomic_state_free(state); 160 drm_atomic_state_free(state);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 647b1404c441..87476ff181dd 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -6315,9 +6315,6 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
6315 struct drm_connector *connector; 6315 struct drm_connector *connector;
6316 struct drm_i915_private *dev_priv = dev->dev_private; 6316 struct drm_i915_private *dev_priv = dev->dev_private;
6317 6317
6318 /* crtc should still be enabled when we disable it. */
6319 WARN_ON(!crtc->state->enable);
6320
6321 intel_crtc_disable_planes(crtc); 6318 intel_crtc_disable_planes(crtc);
6322 dev_priv->display.crtc_disable(crtc); 6319 dev_priv->display.crtc_disable(crtc);
6323 dev_priv->display.off(crtc); 6320 dev_priv->display.off(crtc);
@@ -11829,7 +11826,9 @@ encoder_retry:
11829 goto encoder_retry; 11826 goto encoder_retry;
11830 } 11827 }
11831 11828
11832 pipe_config->dither = pipe_config->pipe_bpp != base_bpp; 11829 /* Dithering seems to not pass-through bits correctly when it should, so
11830 * only enable it on 6bpc panels. */
11831 pipe_config->dither = pipe_config->pipe_bpp == 6*3;
11833 DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n", 11832 DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
11834 base_bpp, pipe_config->pipe_bpp, pipe_config->dither); 11833 base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
11835 11834
@@ -12591,7 +12590,8 @@ static int __intel_set_mode(struct drm_crtc *modeset_crtc,
12591 continue; 12590 continue;
12592 12591
12593 if (!crtc_state->enable) { 12592 if (!crtc_state->enable) {
12594 intel_crtc_disable(crtc); 12593 if (crtc->state->enable)
12594 intel_crtc_disable(crtc);
12595 } else if (crtc->state->enable) { 12595 } else if (crtc->state->enable) {
12596 intel_crtc_disable_planes(crtc); 12596 intel_crtc_disable_planes(crtc);
12597 dev_priv->display.crtc_disable(crtc); 12597 dev_priv->display.crtc_disable(crtc);
@@ -12626,17 +12626,17 @@ static int __intel_set_mode(struct drm_crtc *modeset_crtc,
12626 12626
12627 modeset_update_crtc_power_domains(state); 12627 modeset_update_crtc_power_domains(state);
12628 12628
12629 drm_atomic_helper_commit_planes(dev, state);
12630
12631 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 12629 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
12632 for_each_crtc_in_state(state, crtc, crtc_state, i) { 12630 for_each_crtc_in_state(state, crtc, crtc_state, i) {
12633 if (!needs_modeset(crtc->state) || !crtc->state->enable) 12631 if (!needs_modeset(crtc->state) || !crtc->state->enable) {
12632 drm_atomic_helper_commit_planes_on_crtc(crtc_state);
12634 continue; 12633 continue;
12634 }
12635 12635
12636 update_scanline_offset(to_intel_crtc(crtc)); 12636 update_scanline_offset(to_intel_crtc(crtc));
12637 12637
12638 dev_priv->display.crtc_enable(crtc); 12638 dev_priv->display.crtc_enable(crtc);
12639 intel_crtc_enable_planes(crtc); 12639 drm_atomic_helper_commit_planes_on_crtc(crtc_state);
12640 } 12640 }
12641 12641
12642 /* FIXME: add subpixel order */ 12642 /* FIXME: add subpixel order */
@@ -12893,20 +12893,11 @@ intel_modeset_stage_output_state(struct drm_device *dev,
12893 return 0; 12893 return 0;
12894} 12894}
12895 12895
12896static bool primary_plane_visible(struct drm_crtc *crtc)
12897{
12898 struct intel_plane_state *plane_state =
12899 to_intel_plane_state(crtc->primary->state);
12900
12901 return plane_state->visible;
12902}
12903
12904static int intel_crtc_set_config(struct drm_mode_set *set) 12896static int intel_crtc_set_config(struct drm_mode_set *set)
12905{ 12897{
12906 struct drm_device *dev; 12898 struct drm_device *dev;
12907 struct drm_atomic_state *state = NULL; 12899 struct drm_atomic_state *state = NULL;
12908 struct intel_crtc_state *pipe_config; 12900 struct intel_crtc_state *pipe_config;
12909 bool primary_plane_was_visible;
12910 int ret; 12901 int ret;
12911 12902
12912 BUG_ON(!set); 12903 BUG_ON(!set);
@@ -12945,38 +12936,8 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
12945 12936
12946 intel_update_pipe_size(to_intel_crtc(set->crtc)); 12937 intel_update_pipe_size(to_intel_crtc(set->crtc));
12947 12938
12948 primary_plane_was_visible = primary_plane_visible(set->crtc);
12949
12950 ret = intel_set_mode_with_config(set->crtc, pipe_config, true); 12939 ret = intel_set_mode_with_config(set->crtc, pipe_config, true);
12951 12940
12952 if (ret == 0 &&
12953 pipe_config->base.enable &&
12954 pipe_config->base.planes_changed &&
12955 !needs_modeset(&pipe_config->base)) {
12956 struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
12957
12958 /*
12959 * We need to make sure the primary plane is re-enabled if it
12960 * has previously been turned off.
12961 */
12962 if (ret == 0 && !primary_plane_was_visible &&
12963 primary_plane_visible(set->crtc)) {
12964 WARN_ON(!intel_crtc->active);
12965 intel_post_enable_primary(set->crtc);
12966 }
12967
12968 /*
12969 * In the fastboot case this may be our only check of the
12970 * state after boot. It would be better to only do it on
12971 * the first update, but we don't have a nice way of doing that
12972 * (and really, set_config isn't used much for high freq page
12973 * flipping, so increasing its cost here shouldn't be a big
12974 * deal).
12975 */
12976 if (i915.fastboot && ret == 0)
12977 intel_modeset_check_state(set->crtc->dev);
12978 }
12979
12980 if (ret) { 12941 if (ret) {
12981 DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n", 12942 DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
12982 set->crtc->base.id, ret); 12943 set->crtc->base.id, ret);
@@ -13276,7 +13237,7 @@ intel_check_primary_plane(struct drm_plane *plane,
13276 if (ret) 13237 if (ret)
13277 return ret; 13238 return ret;
13278 13239
13279 if (intel_crtc->active) { 13240 if (crtc_state ? crtc_state->base.active : intel_crtc->active) {
13280 struct intel_plane_state *old_state = 13241 struct intel_plane_state *old_state =
13281 to_intel_plane_state(plane->state); 13242 to_intel_plane_state(plane->state);
13282 13243
@@ -13307,6 +13268,9 @@ intel_check_primary_plane(struct drm_plane *plane,
13307 */ 13268 */
13308 if (IS_BROADWELL(dev)) 13269 if (IS_BROADWELL(dev))
13309 intel_crtc->atomic.wait_vblank = true; 13270 intel_crtc->atomic.wait_vblank = true;
13271
13272 if (crtc_state)
13273 intel_crtc->atomic.post_enable_primary = true;
13310 } 13274 }
13311 13275
13312 /* 13276 /*
@@ -13319,6 +13283,10 @@ intel_check_primary_plane(struct drm_plane *plane,
13319 if (!state->visible || !fb) 13283 if (!state->visible || !fb)
13320 intel_crtc->atomic.disable_ips = true; 13284 intel_crtc->atomic.disable_ips = true;
13321 13285
13286 if (!state->visible && old_state->visible &&
13287 crtc_state && !needs_modeset(&crtc_state->base))
13288 intel_crtc->atomic.pre_disable_primary = true;
13289
13322 intel_crtc->atomic.fb_bits |= 13290 intel_crtc->atomic.fb_bits |=
13323 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe); 13291 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
13324 13292
@@ -15036,6 +15004,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
15036 struct intel_plane_state *plane_state; 15004 struct intel_plane_state *plane_state;
15037 15005
15038 memset(crtc->config, 0, sizeof(*crtc->config)); 15006 memset(crtc->config, 0, sizeof(*crtc->config));
15007 crtc->config->base.crtc = &crtc->base;
15039 15008
15040 crtc->config->quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE; 15009 crtc->config->quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
15041 15010
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 6e8faa253792..1df0e1fe235f 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -93,9 +93,6 @@ static const struct dp_link_dpll chv_dpll[] = {
93 93
94static const int skl_rates[] = { 162000, 216000, 270000, 94static const int skl_rates[] = { 162000, 216000, 270000,
95 324000, 432000, 540000 }; 95 324000, 432000, 540000 };
96static const int chv_rates[] = { 162000, 202500, 210000, 216000,
97 243000, 270000, 324000, 405000,
98 420000, 432000, 540000 };
99static const int default_rates[] = { 162000, 270000, 540000 }; 96static const int default_rates[] = { 162000, 270000, 540000 };
100 97
101/** 98/**
@@ -1169,24 +1166,31 @@ intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1169 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1; 1166 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1170} 1167}
1171 1168
1169static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
1170{
1171 /* WaDisableHBR2:skl */
1172 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1173 return false;
1174
1175 if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1176 (INTEL_INFO(dev)->gen >= 9))
1177 return true;
1178 else
1179 return false;
1180}
1181
1172static int 1182static int
1173intel_dp_source_rates(struct drm_device *dev, const int **source_rates) 1183intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1174{ 1184{
1175 if (IS_SKYLAKE(dev)) { 1185 if (IS_SKYLAKE(dev)) {
1176 *source_rates = skl_rates; 1186 *source_rates = skl_rates;
1177 return ARRAY_SIZE(skl_rates); 1187 return ARRAY_SIZE(skl_rates);
1178 } else if (IS_CHERRYVIEW(dev)) {
1179 *source_rates = chv_rates;
1180 return ARRAY_SIZE(chv_rates);
1181 } 1188 }
1182 1189
1183 *source_rates = default_rates; 1190 *source_rates = default_rates;
1184 1191
1185 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) 1192 /* This depends on the fact that 5.4 is last value in the array */
1186 /* WaDisableHBR2:skl */ 1193 if (intel_dp_source_supports_hbr2(dev))
1187 return (DP_LINK_BW_2_7 >> 3) + 1;
1188 else if (INTEL_INFO(dev)->gen >= 8 ||
1189 (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1190 return (DP_LINK_BW_5_4 >> 3) + 1; 1194 return (DP_LINK_BW_5_4 >> 3) + 1;
1191 else 1195 else
1192 return (DP_LINK_BW_2_7 >> 3) + 1; 1196 return (DP_LINK_BW_2_7 >> 3) + 1;
@@ -3941,10 +3945,15 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
3941 } 3945 }
3942 } 3946 }
3943 3947
3944 /* Training Pattern 3 support, both source and sink */ 3948 /* Training Pattern 3 support, Intel platforms that support HBR2 alone
3949 * have support for TP3 hence that check is used along with dpcd check
3950 * to ensure TP3 can be enabled.
3951 * SKL < B0: due it's WaDisableHBR2 is the only exception where TP3 is
3952 * supported but still not enabled.
3953 */
3945 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 && 3954 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
3946 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED && 3955 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3947 (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) { 3956 intel_dp_source_supports_hbr2(dev)) {
3948 intel_dp->use_tps3 = true; 3957 intel_dp->use_tps3 = true;
3949 DRM_DEBUG_KMS("Displayport TPS3 supported\n"); 3958 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
3950 } else 3959 } else
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 6e4cc5334f47..600afdbef8c9 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -357,6 +357,16 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
357 return MODE_OK; 357 return MODE_OK;
358} 358}
359 359
360static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *connector,
361 struct drm_connector_state *state)
362{
363 struct intel_connector *intel_connector = to_intel_connector(connector);
364 struct intel_dp *intel_dp = intel_connector->mst_port;
365 struct intel_crtc *crtc = to_intel_crtc(state->crtc);
366
367 return &intel_dp->mst_encoders[crtc->pipe]->base.base;
368}
369
360static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connector) 370static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connector)
361{ 371{
362 struct intel_connector *intel_connector = to_intel_connector(connector); 372 struct intel_connector *intel_connector = to_intel_connector(connector);
@@ -367,6 +377,7 @@ static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connecto
367static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = { 377static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = {
368 .get_modes = intel_dp_mst_get_modes, 378 .get_modes = intel_dp_mst_get_modes,
369 .mode_valid = intel_dp_mst_mode_valid, 379 .mode_valid = intel_dp_mst_mode_valid,
380 .atomic_best_encoder = intel_mst_atomic_best_encoder,
370 .best_encoder = intel_mst_best_encoder, 381 .best_encoder = intel_mst_best_encoder,
371}; 382};
372 383
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 9b74ffae5f5a..7f2161a1ff5d 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1012,6 +1012,8 @@ static int intel_lr_context_pin(struct intel_engine_cs *ring,
1012 ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf); 1012 ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
1013 if (ret) 1013 if (ret)
1014 goto unpin_ctx_obj; 1014 goto unpin_ctx_obj;
1015
1016 ctx_obj->dirty = true;
1015 } 1017 }
1016 1018
1017 return ret; 1019 return ret;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index e539314ae87e..4be66f60504d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -275,6 +275,13 @@ struct intel_engine_cs {
275 * Do we have some not yet emitted requests outstanding? 275 * Do we have some not yet emitted requests outstanding?
276 */ 276 */
277 struct drm_i915_gem_request *outstanding_lazy_request; 277 struct drm_i915_gem_request *outstanding_lazy_request;
278 /**
279 * Seqno of request most recently submitted to request_list.
280 * Used exclusively by hang checker to avoid grabbing lock while
281 * inspecting request list.
282 */
283 u32 last_submitted_seqno;
284
278 bool gpu_caches_dirty; 285 bool gpu_caches_dirty;
279 286
280 wait_queue_head_t irq_queue; 287 wait_queue_head_t irq_queue;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index a6d8a3ee7750..260389acfb77 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1274,10 +1274,12 @@ int i915_reg_read_ioctl(struct drm_device *dev,
1274 struct drm_i915_private *dev_priv = dev->dev_private; 1274 struct drm_i915_private *dev_priv = dev->dev_private;
1275 struct drm_i915_reg_read *reg = data; 1275 struct drm_i915_reg_read *reg = data;
1276 struct register_whitelist const *entry = whitelist; 1276 struct register_whitelist const *entry = whitelist;
1277 unsigned size;
1278 u64 offset;
1277 int i, ret = 0; 1279 int i, ret = 0;
1278 1280
1279 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { 1281 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1280 if (entry->offset == reg->offset && 1282 if (entry->offset == (reg->offset & -entry->size) &&
1281 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) 1283 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
1282 break; 1284 break;
1283 } 1285 }
@@ -1285,23 +1287,33 @@ int i915_reg_read_ioctl(struct drm_device *dev,
1285 if (i == ARRAY_SIZE(whitelist)) 1287 if (i == ARRAY_SIZE(whitelist))
1286 return -EINVAL; 1288 return -EINVAL;
1287 1289
1290 /* We use the low bits to encode extra flags as the register should
1291 * be naturally aligned (and those that are not so aligned merely
1292 * limit the available flags for that register).
1293 */
1294 offset = entry->offset;
1295 size = entry->size;
1296 size |= reg->offset ^ offset;
1297
1288 intel_runtime_pm_get(dev_priv); 1298 intel_runtime_pm_get(dev_priv);
1289 1299
1290 switch (entry->size) { 1300 switch (size) {
1301 case 8 | 1:
1302 reg->val = I915_READ64_2x32(offset, offset+4);
1303 break;
1291 case 8: 1304 case 8:
1292 reg->val = I915_READ64(reg->offset); 1305 reg->val = I915_READ64(offset);
1293 break; 1306 break;
1294 case 4: 1307 case 4:
1295 reg->val = I915_READ(reg->offset); 1308 reg->val = I915_READ(offset);
1296 break; 1309 break;
1297 case 2: 1310 case 2:
1298 reg->val = I915_READ16(reg->offset); 1311 reg->val = I915_READ16(offset);
1299 break; 1312 break;
1300 case 1: 1313 case 1:
1301 reg->val = I915_READ8(reg->offset); 1314 reg->val = I915_READ8(offset);
1302 break; 1315 break;
1303 default: 1316 default:
1304 MISSING_CASE(entry->size);
1305 ret = -EINVAL; 1317 ret = -EINVAL;
1306 goto out; 1318 goto out;
1307 } 1319 }
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index 214eceefc981..e671ad369416 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -301,7 +301,7 @@ static void imx_tve_encoder_prepare(struct drm_encoder *encoder)
301 301
302 switch (tve->mode) { 302 switch (tve->mode) {
303 case TVE_MODE_VGA: 303 case TVE_MODE_VGA:
304 imx_drm_set_bus_format_pins(encoder, MEDIA_BUS_FMT_YUV8_1X24, 304 imx_drm_set_bus_format_pins(encoder, MEDIA_BUS_FMT_GBR888_1X24,
305 tve->hsync_pin, tve->vsync_pin); 305 tve->hsync_pin, tve->vsync_pin);
306 break; 306 break;
307 case TVE_MODE_TVOUT: 307 case TVE_MODE_TVOUT:
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 74a9ce40ddc4..b4deb9cf9d71 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -21,6 +21,7 @@
21#include <drm/drm_panel.h> 21#include <drm/drm_panel.h>
22#include <linux/videodev2.h> 22#include <linux/videodev2.h>
23#include <video/of_display_timing.h> 23#include <video/of_display_timing.h>
24#include <linux/of_graph.h>
24 25
25#include "imx-drm.h" 26#include "imx-drm.h"
26 27
@@ -208,7 +209,7 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
208{ 209{
209 struct drm_device *drm = data; 210 struct drm_device *drm = data;
210 struct device_node *np = dev->of_node; 211 struct device_node *np = dev->of_node;
211 struct device_node *panel_node; 212 struct device_node *port;
212 const u8 *edidp; 213 const u8 *edidp;
213 struct imx_parallel_display *imxpd; 214 struct imx_parallel_display *imxpd;
214 int ret; 215 int ret;
@@ -234,11 +235,19 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
234 imxpd->bus_format = MEDIA_BUS_FMT_RGB666_1X24_CPADHI; 235 imxpd->bus_format = MEDIA_BUS_FMT_RGB666_1X24_CPADHI;
235 } 236 }
236 237
237 panel_node = of_parse_phandle(np, "fsl,panel", 0); 238 /* port@1 is the output port */
238 if (panel_node) { 239 port = of_graph_get_port_by_id(np, 1);
239 imxpd->panel = of_drm_find_panel(panel_node); 240 if (port) {
240 if (!imxpd->panel) 241 struct device_node *endpoint, *remote;
241 return -EPROBE_DEFER; 242
243 endpoint = of_get_child_by_name(port, "endpoint");
244 if (endpoint) {
245 remote = of_graph_get_remote_port_parent(endpoint);
246 if (remote)
247 imxpd->panel = of_drm_find_panel(remote);
248 if (!imxpd->panel)
249 return -EPROBE_DEFER;
250 }
242 } 251 }
243 252
244 imxpd->dev = dev; 253 imxpd->dev = dev;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 0d1dbb737933..247a424445f7 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -220,13 +220,15 @@ static int mdp4_plane_mode_set(struct drm_plane *plane,
220 uint32_t op_mode = 0; 220 uint32_t op_mode = 0;
221 uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT; 221 uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
222 uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT; 222 uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT;
223 enum mdp4_frame_format frame_type = mdp4_get_frame_format(fb); 223 enum mdp4_frame_format frame_type;
224 224
225 if (!(crtc && fb)) { 225 if (!(crtc && fb)) {
226 DBG("%s: disabled!", mdp4_plane->name); 226 DBG("%s: disabled!", mdp4_plane->name);
227 return 0; 227 return 0;
228 } 228 }
229 229
230 frame_type = mdp4_get_frame_format(fb);
231
230 /* src values are in Q16 fixed point, convert to integer: */ 232 /* src values are in Q16 fixed point, convert to integer: */
231 src_x = src_x >> 16; 233 src_x = src_x >> 16;
232 src_y = src_y >> 16; 234 src_y = src_y >> 16;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 206f758f7d64..e253db5de5aa 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -76,7 +76,20 @@ static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *st
76 76
77static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) 77static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
78{ 78{
79 int i;
79 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 80 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
81 int nplanes = mdp5_kms->dev->mode_config.num_total_plane;
82
83 for (i = 0; i < nplanes; i++) {
84 struct drm_plane *plane = state->planes[i];
85 struct drm_plane_state *plane_state = state->plane_states[i];
86
87 if (!plane)
88 continue;
89
90 mdp5_plane_complete_commit(plane, plane_state);
91 }
92
80 mdp5_disable(mdp5_kms); 93 mdp5_disable(mdp5_kms);
81} 94}
82 95
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index e0eb24587c84..e79ac09b7216 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -227,6 +227,8 @@ void mdp5_plane_install_properties(struct drm_plane *plane,
227 struct drm_mode_object *obj); 227 struct drm_mode_object *obj);
228uint32_t mdp5_plane_get_flush(struct drm_plane *plane); 228uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
229void mdp5_plane_complete_flip(struct drm_plane *plane); 229void mdp5_plane_complete_flip(struct drm_plane *plane);
230void mdp5_plane_complete_commit(struct drm_plane *plane,
231 struct drm_plane_state *state);
230enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); 232enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
231struct drm_plane *mdp5_plane_init(struct drm_device *dev, 233struct drm_plane *mdp5_plane_init(struct drm_device *dev,
232 enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset); 234 enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 57b8f56ae9d0..22275568ab8b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -31,8 +31,6 @@ struct mdp5_plane {
31 31
32 uint32_t nformats; 32 uint32_t nformats;
33 uint32_t formats[32]; 33 uint32_t formats[32];
34
35 bool enabled;
36}; 34};
37#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base) 35#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base)
38 36
@@ -56,22 +54,6 @@ static bool plane_enabled(struct drm_plane_state *state)
56 return state->fb && state->crtc; 54 return state->fb && state->crtc;
57} 55}
58 56
59static int mdp5_plane_disable(struct drm_plane *plane)
60{
61 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
62 struct mdp5_kms *mdp5_kms = get_kms(plane);
63 enum mdp5_pipe pipe = mdp5_plane->pipe;
64
65 DBG("%s: disable", mdp5_plane->name);
66
67 if (mdp5_kms) {
68 /* Release the memory we requested earlier from the SMP: */
69 mdp5_smp_release(mdp5_kms->smp, pipe);
70 }
71
72 return 0;
73}
74
75static void mdp5_plane_destroy(struct drm_plane *plane) 57static void mdp5_plane_destroy(struct drm_plane *plane)
76{ 58{
77 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); 59 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
@@ -224,7 +206,6 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane,
224 206
225 if (!plane_enabled(state)) { 207 if (!plane_enabled(state)) {
226 to_mdp5_plane_state(state)->pending = true; 208 to_mdp5_plane_state(state)->pending = true;
227 mdp5_plane_disable(plane);
228 } else if (to_mdp5_plane_state(state)->mode_changed) { 209 } else if (to_mdp5_plane_state(state)->mode_changed) {
229 int ret; 210 int ret;
230 to_mdp5_plane_state(state)->pending = true; 211 to_mdp5_plane_state(state)->pending = true;
@@ -602,6 +583,20 @@ uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
602 return mdp5_plane->flush_mask; 583 return mdp5_plane->flush_mask;
603} 584}
604 585
586/* called after vsync in thread context */
587void mdp5_plane_complete_commit(struct drm_plane *plane,
588 struct drm_plane_state *state)
589{
590 struct mdp5_kms *mdp5_kms = get_kms(plane);
591 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
592 enum mdp5_pipe pipe = mdp5_plane->pipe;
593
594 if (!plane_enabled(plane->state)) {
595 DBG("%s: free SMP", mdp5_plane->name);
596 mdp5_smp_release(mdp5_kms->smp, pipe);
597 }
598}
599
605/* initialize plane */ 600/* initialize plane */
606struct drm_plane *mdp5_plane_init(struct drm_device *dev, 601struct drm_plane *mdp5_plane_init(struct drm_device *dev,
607 enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset) 602 enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
index 16702aecf0df..64a27d86f2f5 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
@@ -34,22 +34,44 @@
34 * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0). 34 * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0).
35 * 35 *
36 * For each block that can be dynamically allocated, it can be either 36 * For each block that can be dynamically allocated, it can be either
37 * free, or pending/in-use by a client. The updates happen in three steps: 37 * free:
38 * The block is free.
39 *
40 * pending:
41 * The block is allocated to some client and not free.
42 *
43 * configured:
44 * The block is allocated to some client, and assigned to that
45 * client in MDP5_MDP_SMP_ALLOC registers.
46 *
47 * inuse:
48 * The block is being actively used by a client.
49 *
50 * The updates happen in the following steps:
38 * 51 *
39 * 1) mdp5_smp_request(): 52 * 1) mdp5_smp_request():
40 * When plane scanout is setup, calculate required number of 53 * When plane scanout is setup, calculate required number of
41 * blocks needed per client, and request. Blocks not inuse or 54 * blocks needed per client, and request. Blocks neither inuse nor
42 * pending by any other client are added to client's pending 55 * configured nor pending by any other client are added to client's
43 * set. 56 * pending set.
57 * For shrinking, blocks in pending but not in configured can be freed
58 * directly, but those already in configured will be freed later by
59 * mdp5_smp_commit.
44 * 60 *
45 * 2) mdp5_smp_configure(): 61 * 2) mdp5_smp_configure():
46 * As hw is programmed, before FLUSH, MDP5_MDP_SMP_ALLOC registers 62 * As hw is programmed, before FLUSH, MDP5_MDP_SMP_ALLOC registers
47 * are configured for the union(pending, inuse) 63 * are configured for the union(pending, inuse)
64 * Current pending is copied to configured.
65 * It is assumed that mdp5_smp_request and mdp5_smp_configure not run
66 * concurrently for the same pipe.
48 * 67 *
49 * 3) mdp5_smp_commit(): 68 * 3) mdp5_smp_commit():
50 * After next vblank, copy pending -> inuse. Optionally update 69 * After next vblank, copy configured -> inuse. Optionally update
51 * MDP5_SMP_ALLOC registers if there are newly unused blocks 70 * MDP5_SMP_ALLOC registers if there are newly unused blocks
52 * 71 *
72 * 4) mdp5_smp_release():
73 * Must be called after the pipe is disabled and no longer uses any SMB
74 *
53 * On the next vblank after changes have been committed to hw, the 75 * On the next vblank after changes have been committed to hw, the
54 * client's pending blocks become it's in-use blocks (and no-longer 76 * client's pending blocks become it's in-use blocks (and no-longer
55 * in-use blocks become available to other clients). 77 * in-use blocks become available to other clients).
@@ -77,6 +99,9 @@ struct mdp5_smp {
77 struct mdp5_client_smp_state client_state[MAX_CLIENTS]; 99 struct mdp5_client_smp_state client_state[MAX_CLIENTS];
78}; 100};
79 101
102static void update_smp_state(struct mdp5_smp *smp,
103 u32 cid, mdp5_smp_state_t *assigned);
104
80static inline 105static inline
81struct mdp5_kms *get_kms(struct mdp5_smp *smp) 106struct mdp5_kms *get_kms(struct mdp5_smp *smp)
82{ 107{
@@ -149,7 +174,12 @@ static int smp_request_block(struct mdp5_smp *smp,
149 for (i = cur_nblks; i > nblks; i--) { 174 for (i = cur_nblks; i > nblks; i--) {
150 int blk = find_first_bit(ps->pending, cnt); 175 int blk = find_first_bit(ps->pending, cnt);
151 clear_bit(blk, ps->pending); 176 clear_bit(blk, ps->pending);
152 /* don't clear in global smp_state until _commit() */ 177
178 /* clear in global smp_state if not in configured
179 * otherwise until _commit()
180 */
181 if (!test_bit(blk, ps->configured))
182 clear_bit(blk, smp->state);
153 } 183 }
154 } 184 }
155 185
@@ -223,10 +253,33 @@ int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 wid
223/* Release SMP blocks for all clients of the pipe */ 253/* Release SMP blocks for all clients of the pipe */
224void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe) 254void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe)
225{ 255{
226 int i, nblks; 256 int i;
257 unsigned long flags;
258 int cnt = smp->blk_cnt;
259
260 for (i = 0; i < pipe2nclients(pipe); i++) {
261 mdp5_smp_state_t assigned;
262 u32 cid = pipe2client(pipe, i);
263 struct mdp5_client_smp_state *ps = &smp->client_state[cid];
264
265 spin_lock_irqsave(&smp->state_lock, flags);
266
267 /* clear hw assignment */
268 bitmap_or(assigned, ps->inuse, ps->configured, cnt);
269 update_smp_state(smp, CID_UNUSED, &assigned);
270
271 /* free to global pool */
272 bitmap_andnot(smp->state, smp->state, ps->pending, cnt);
273 bitmap_andnot(smp->state, smp->state, assigned, cnt);
274
275 /* clear client's infor */
276 bitmap_zero(ps->pending, cnt);
277 bitmap_zero(ps->configured, cnt);
278 bitmap_zero(ps->inuse, cnt);
279
280 spin_unlock_irqrestore(&smp->state_lock, flags);
281 }
227 282
228 for (i = 0, nblks = 0; i < pipe2nclients(pipe); i++)
229 smp_request_block(smp, pipe2client(pipe, i), 0);
230 set_fifo_thresholds(smp, pipe, 0); 283 set_fifo_thresholds(smp, pipe, 0);
231} 284}
232 285
@@ -274,12 +327,20 @@ void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe)
274 u32 cid = pipe2client(pipe, i); 327 u32 cid = pipe2client(pipe, i);
275 struct mdp5_client_smp_state *ps = &smp->client_state[cid]; 328 struct mdp5_client_smp_state *ps = &smp->client_state[cid];
276 329
277 bitmap_or(assigned, ps->inuse, ps->pending, cnt); 330 /*
331 * if vblank has not happened since last smp_configure
332 * skip the configure for now
333 */
334 if (!bitmap_equal(ps->inuse, ps->configured, cnt))
335 continue;
336
337 bitmap_copy(ps->configured, ps->pending, cnt);
338 bitmap_or(assigned, ps->inuse, ps->configured, cnt);
278 update_smp_state(smp, cid, &assigned); 339 update_smp_state(smp, cid, &assigned);
279 } 340 }
280} 341}
281 342
282/* step #3: after vblank, copy pending -> inuse: */ 343/* step #3: after vblank, copy configured -> inuse: */
283void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe) 344void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
284{ 345{
285 int cnt = smp->blk_cnt; 346 int cnt = smp->blk_cnt;
@@ -295,7 +356,7 @@ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
295 * using, which can be released and made available to other 356 * using, which can be released and made available to other
296 * clients: 357 * clients:
297 */ 358 */
298 if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) { 359 if (bitmap_andnot(released, ps->inuse, ps->configured, cnt)) {
299 unsigned long flags; 360 unsigned long flags;
300 361
301 spin_lock_irqsave(&smp->state_lock, flags); 362 spin_lock_irqsave(&smp->state_lock, flags);
@@ -306,7 +367,7 @@ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
306 update_smp_state(smp, CID_UNUSED, &released); 367 update_smp_state(smp, CID_UNUSED, &released);
307 } 368 }
308 369
309 bitmap_copy(ps->inuse, ps->pending, cnt); 370 bitmap_copy(ps->inuse, ps->configured, cnt);
310 } 371 }
311} 372}
312 373
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
index e47179f63585..5b6c2363f592 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
@@ -23,6 +23,7 @@
23 23
24struct mdp5_client_smp_state { 24struct mdp5_client_smp_state {
25 mdp5_smp_state_t inuse; 25 mdp5_smp_state_t inuse;
26 mdp5_smp_state_t configured;
26 mdp5_smp_state_t pending; 27 mdp5_smp_state_t pending;
27}; 28};
28 29
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 1b22d8bfe142..1ceb4f22dd89 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -283,12 +283,8 @@ int msm_atomic_commit(struct drm_device *dev,
283 283
284 timeout = ktime_add_ms(ktime_get(), 1000); 284 timeout = ktime_add_ms(ktime_get(), 1000);
285 285
286 ret = msm_wait_fence_interruptable(dev, c->fence, &timeout); 286 /* uninterruptible wait */
287 if (ret) { 287 msm_wait_fence(dev, c->fence, &timeout, false);
288 WARN_ON(ret); // TODO unswap state back? or??
289 commit_destroy(c);
290 return ret;
291 }
292 288
293 complete_commit(c); 289 complete_commit(c);
294 290
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index b7ef56ed8d1c..d3467b115e04 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -637,8 +637,8 @@ static void msm_debugfs_cleanup(struct drm_minor *minor)
637 * Fences: 637 * Fences:
638 */ 638 */
639 639
640int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence, 640int msm_wait_fence(struct drm_device *dev, uint32_t fence,
641 ktime_t *timeout) 641 ktime_t *timeout , bool interruptible)
642{ 642{
643 struct msm_drm_private *priv = dev->dev_private; 643 struct msm_drm_private *priv = dev->dev_private;
644 int ret; 644 int ret;
@@ -667,7 +667,12 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
667 remaining_jiffies = timespec_to_jiffies(&ts); 667 remaining_jiffies = timespec_to_jiffies(&ts);
668 } 668 }
669 669
670 ret = wait_event_interruptible_timeout(priv->fence_event, 670 if (interruptible)
671 ret = wait_event_interruptible_timeout(priv->fence_event,
672 fence_completed(dev, fence),
673 remaining_jiffies);
674 else
675 ret = wait_event_timeout(priv->fence_event,
671 fence_completed(dev, fence), 676 fence_completed(dev, fence),
672 remaining_jiffies); 677 remaining_jiffies);
673 678
@@ -853,7 +858,7 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
853 return -EINVAL; 858 return -EINVAL;
854 } 859 }
855 860
856 return msm_wait_fence_interruptable(dev, args->fence, &timeout); 861 return msm_wait_fence(dev, args->fence, &timeout, true);
857} 862}
858 863
859static const struct drm_ioctl_desc msm_ioctls[] = { 864static const struct drm_ioctl_desc msm_ioctls[] = {
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index e7c5ea125d45..4ff0ec9c994b 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -164,8 +164,8 @@ int msm_atomic_commit(struct drm_device *dev,
164 164
165int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu); 165int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
166 166
167int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence, 167int msm_wait_fence(struct drm_device *dev, uint32_t fence,
168 ktime_t *timeout); 168 ktime_t *timeout, bool interruptible);
169int msm_queue_fence_cb(struct drm_device *dev, 169int msm_queue_fence_cb(struct drm_device *dev,
170 struct msm_fence_cb *cb, uint32_t fence); 170 struct msm_fence_cb *cb, uint32_t fence);
171void msm_update_fence(struct drm_device *dev, uint32_t fence); 171void msm_update_fence(struct drm_device *dev, uint32_t fence);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index f211b80e3a1e..c76cc853b08a 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -460,7 +460,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
460 if (op & MSM_PREP_NOSYNC) 460 if (op & MSM_PREP_NOSYNC)
461 timeout = NULL; 461 timeout = NULL;
462 462
463 ret = msm_wait_fence_interruptable(dev, fence, timeout); 463 ret = msm_wait_fence(dev, fence, timeout, true);
464 } 464 }
465 465
466 /* TODO cache maintenance */ 466 /* TODO cache maintenance */
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index dd7a7ab603e2..831461bc98a5 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -23,8 +23,12 @@
23struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj) 23struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
24{ 24{
25 struct msm_gem_object *msm_obj = to_msm_bo(obj); 25 struct msm_gem_object *msm_obj = to_msm_bo(obj);
26 BUG_ON(!msm_obj->sgt); /* should have already pinned! */ 26 int npages = obj->size >> PAGE_SHIFT;
27 return msm_obj->sgt; 27
28 if (WARN_ON(!msm_obj->pages)) /* should have already pinned! */
29 return NULL;
30
31 return drm_prime_pages_to_sg(msm_obj->pages, npages);
28} 32}
29 33
30void *msm_gem_prime_vmap(struct drm_gem_object *obj) 34void *msm_gem_prime_vmap(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 649024d4daf1..477cbb12809b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -128,6 +128,7 @@ nouveau_cli_destroy(struct nouveau_cli *cli)
128 nvkm_vm_ref(NULL, &nvxx_client(&cli->base)->vm, NULL); 128 nvkm_vm_ref(NULL, &nvxx_client(&cli->base)->vm, NULL);
129 nvif_client_fini(&cli->base); 129 nvif_client_fini(&cli->base);
130 usif_client_fini(cli); 130 usif_client_fini(cli);
131 kfree(cli);
131} 132}
132 133
133static void 134static void
@@ -865,8 +866,10 @@ nouveau_drm_preclose(struct drm_device *dev, struct drm_file *fpriv)
865 866
866 pm_runtime_get_sync(dev->dev); 867 pm_runtime_get_sync(dev->dev);
867 868
869 mutex_lock(&cli->mutex);
868 if (cli->abi16) 870 if (cli->abi16)
869 nouveau_abi16_fini(cli->abi16); 871 nouveau_abi16_fini(cli->abi16);
872 mutex_unlock(&cli->mutex);
870 873
871 mutex_lock(&drm->client.mutex); 874 mutex_lock(&drm->client.mutex);
872 list_del(&cli->head); 875 list_del(&cli->head);
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index 775277f1edb0..dcfbbfaf1739 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -92,6 +92,8 @@ static int nouveau_platform_power_down(struct nouveau_platform_gpu *gpu)
92 return 0; 92 return 0;
93} 93}
94 94
95#if IS_ENABLED(CONFIG_IOMMU_API)
96
95static void nouveau_platform_probe_iommu(struct device *dev, 97static void nouveau_platform_probe_iommu(struct device *dev,
96 struct nouveau_platform_gpu *gpu) 98 struct nouveau_platform_gpu *gpu)
97{ 99{
@@ -158,6 +160,20 @@ static void nouveau_platform_remove_iommu(struct device *dev,
158 } 160 }
159} 161}
160 162
163#else
164
165static void nouveau_platform_probe_iommu(struct device *dev,
166 struct nouveau_platform_gpu *gpu)
167{
168}
169
170static void nouveau_platform_remove_iommu(struct device *dev,
171 struct nouveau_platform_gpu *gpu)
172{
173}
174
175#endif
176
161static int nouveau_platform_probe(struct platform_device *pdev) 177static int nouveau_platform_probe(struct platform_device *pdev)
162{ 178{
163 struct nouveau_platform_gpu *gpu; 179 struct nouveau_platform_gpu *gpu;
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 18f449715788..7464aef34674 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -175,15 +175,24 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
175 node->page_shift = 12; 175 node->page_shift = 12;
176 176
177 switch (drm->device.info.family) { 177 switch (drm->device.info.family) {
178 case NV_DEVICE_INFO_V0_TNT:
179 case NV_DEVICE_INFO_V0_CELSIUS:
180 case NV_DEVICE_INFO_V0_KELVIN:
181 case NV_DEVICE_INFO_V0_RANKINE:
182 case NV_DEVICE_INFO_V0_CURIE:
183 break;
178 case NV_DEVICE_INFO_V0_TESLA: 184 case NV_DEVICE_INFO_V0_TESLA:
179 if (drm->device.info.chipset != 0x50) 185 if (drm->device.info.chipset != 0x50)
180 node->memtype = (nvbo->tile_flags & 0x7f00) >> 8; 186 node->memtype = (nvbo->tile_flags & 0x7f00) >> 8;
181 break; 187 break;
182 case NV_DEVICE_INFO_V0_FERMI: 188 case NV_DEVICE_INFO_V0_FERMI:
183 case NV_DEVICE_INFO_V0_KEPLER: 189 case NV_DEVICE_INFO_V0_KEPLER:
190 case NV_DEVICE_INFO_V0_MAXWELL:
184 node->memtype = (nvbo->tile_flags & 0xff00) >> 8; 191 node->memtype = (nvbo->tile_flags & 0xff00) >> 8;
185 break; 192 break;
186 default: 193 default:
194 NV_WARN(drm, "%s: unhandled family type %x\n", __func__,
195 drm->device.info.family);
187 break; 196 break;
188 } 197 }
189 198
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 4ef602c5469d..495c57644ced 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -203,7 +203,7 @@ nv04_fbcon_accel_init(struct fb_info *info)
203 if (ret) 203 if (ret)
204 return ret; 204 return ret;
205 205
206 if (RING_SPACE(chan, 49)) { 206 if (RING_SPACE(chan, 49 + (device->info.chipset >= 0x11 ? 4 : 0))) {
207 nouveau_fbcon_gpu_lockup(info); 207 nouveau_fbcon_gpu_lockup(info);
208 return 0; 208 return 0;
209 } 209 }
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 7da7958556a3..981342d142ff 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -979,7 +979,7 @@ nv50_crtc_cursor_show_hide(struct nouveau_crtc *nv_crtc, bool show, bool update)
979{ 979{
980 struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev); 980 struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
981 981
982 if (show && nv_crtc->cursor.nvbo) 982 if (show && nv_crtc->cursor.nvbo && nv_crtc->base.enabled)
983 nv50_crtc_cursor_show(nv_crtc); 983 nv50_crtc_cursor_show(nv_crtc);
984 else 984 else
985 nv50_crtc_cursor_hide(nv_crtc); 985 nv50_crtc_cursor_hide(nv_crtc);
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 394c89abcc97..901130b06072 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -188,7 +188,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
188 if (ret) 188 if (ret)
189 return ret; 189 return ret;
190 190
191 ret = RING_SPACE(chan, 59); 191 ret = RING_SPACE(chan, 58);
192 if (ret) { 192 if (ret) {
193 nouveau_fbcon_gpu_lockup(info); 193 nouveau_fbcon_gpu_lockup(info);
194 return ret; 194 return ret;
@@ -252,6 +252,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
252 OUT_RING(chan, info->var.yres_virtual); 252 OUT_RING(chan, info->var.yres_virtual);
253 OUT_RING(chan, upper_32_bits(fb->vma.offset)); 253 OUT_RING(chan, upper_32_bits(fb->vma.offset));
254 OUT_RING(chan, lower_32_bits(fb->vma.offset)); 254 OUT_RING(chan, lower_32_bits(fb->vma.offset));
255 FIRE_RING(chan);
255 256
256 return 0; 257 return 0;
257} 258}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index 61246677e8dc..fcd2e5f27bb9 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -188,7 +188,7 @@ nvc0_fbcon_accel_init(struct fb_info *info)
188 return -EINVAL; 188 return -EINVAL;
189 } 189 }
190 190
191 ret = RING_SPACE(chan, 60); 191 ret = RING_SPACE(chan, 58);
192 if (ret) { 192 if (ret) {
193 WARN_ON(1); 193 WARN_ON(1);
194 nouveau_fbcon_gpu_lockup(info); 194 nouveau_fbcon_gpu_lockup(info);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c
index 9ef6728c528d..7f2f05f78cc8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c
@@ -809,7 +809,7 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
809 case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break; 809 case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
810 default: 810 default:
811 nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl); 811 nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
812 return 0x0000; 812 return NULL;
813 } 813 }
814 } 814 }
815 815
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 5606c25e5d02..ca11ddb6ed46 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -663,6 +663,37 @@ gf100_gr_zbc_init(struct gf100_gr_priv *priv)
663 gf100_gr_zbc_clear_depth(priv, index); 663 gf100_gr_zbc_clear_depth(priv, index);
664} 664}
665 665
666/**
667 * Wait until GR goes idle. GR is considered idle if it is disabled by the
668 * MC (0x200) register, or GR is not busy and a context switch is not in
669 * progress.
670 */
671int
672gf100_gr_wait_idle(struct gf100_gr_priv *priv)
673{
674 unsigned long end_jiffies = jiffies + msecs_to_jiffies(2000);
675 bool gr_enabled, ctxsw_active, gr_busy;
676
677 do {
678 /*
679 * required to make sure FIFO_ENGINE_STATUS (0x2640) is
680 * up-to-date
681 */
682 nv_rd32(priv, 0x400700);
683
684 gr_enabled = nv_rd32(priv, 0x200) & 0x1000;
685 ctxsw_active = nv_rd32(priv, 0x2640) & 0x8000;
686 gr_busy = nv_rd32(priv, 0x40060c) & 0x1;
687
688 if (!gr_enabled || (!gr_busy && !ctxsw_active))
689 return 0;
690 } while (time_before(jiffies, end_jiffies));
691
692 nv_error(priv, "wait for idle timeout (en: %d, ctxsw: %d, busy: %d)\n",
693 gr_enabled, ctxsw_active, gr_busy);
694 return -EAGAIN;
695}
696
666void 697void
667gf100_gr_mmio(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p) 698gf100_gr_mmio(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p)
668{ 699{
@@ -699,7 +730,13 @@ gf100_gr_icmd(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p)
699 730
700 while (addr < next) { 731 while (addr < next) {
701 nv_wr32(priv, 0x400200, addr); 732 nv_wr32(priv, 0x400200, addr);
702 nv_wait(priv, 0x400700, 0x00000002, 0x00000000); 733 /**
734 * Wait for GR to go idle after submitting a
735 * GO_IDLE bundle
736 */
737 if ((addr & 0xffff) == 0xe100)
738 gf100_gr_wait_idle(priv);
739 nv_wait(priv, 0x400700, 0x00000004, 0x00000000);
703 addr += init->pitch; 740 addr += init->pitch;
704 } 741 }
705 } 742 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index 8af1a89eda84..c9533fdac4fc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -181,6 +181,7 @@ struct gf100_gr_oclass {
181 int ppc_nr; 181 int ppc_nr;
182}; 182};
183 183
184int gf100_gr_wait_idle(struct gf100_gr_priv *);
184void gf100_gr_mmio(struct gf100_gr_priv *, const struct gf100_gr_pack *); 185void gf100_gr_mmio(struct gf100_gr_priv *, const struct gf100_gr_pack *);
185void gf100_gr_icmd(struct gf100_gr_priv *, const struct gf100_gr_pack *); 186void gf100_gr_icmd(struct gf100_gr_priv *, const struct gf100_gr_pack *);
186void gf100_gr_mthd(struct gf100_gr_priv *, const struct gf100_gr_pack *); 187void gf100_gr_mthd(struct gf100_gr_priv *, const struct gf100_gr_pack *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
index 2006c445938d..4cf36a3aa814 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
@@ -332,9 +332,12 @@ static void
332nvkm_perfctx_dtor(struct nvkm_object *object) 332nvkm_perfctx_dtor(struct nvkm_object *object)
333{ 333{
334 struct nvkm_pm *ppm = (void *)object->engine; 334 struct nvkm_pm *ppm = (void *)object->engine;
335 struct nvkm_perfctx *ctx = (void *)object;
336
335 mutex_lock(&nv_subdev(ppm)->mutex); 337 mutex_lock(&nv_subdev(ppm)->mutex);
336 nvkm_engctx_destroy(&ppm->context->base); 338 nvkm_engctx_destroy(&ctx->base);
337 ppm->context = NULL; 339 if (ppm->context == ctx)
340 ppm->context = NULL;
338 mutex_unlock(&nv_subdev(ppm)->mutex); 341 mutex_unlock(&nv_subdev(ppm)->mutex);
339} 342}
340 343
@@ -355,12 +358,11 @@ nvkm_perfctx_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
355 mutex_lock(&nv_subdev(ppm)->mutex); 358 mutex_lock(&nv_subdev(ppm)->mutex);
356 if (ppm->context == NULL) 359 if (ppm->context == NULL)
357 ppm->context = ctx; 360 ppm->context = ctx;
358 mutex_unlock(&nv_subdev(ppm)->mutex);
359
360 if (ctx != ppm->context) 361 if (ctx != ppm->context)
361 return -EBUSY; 362 ret = -EBUSY;
363 mutex_unlock(&nv_subdev(ppm)->mutex);
362 364
363 return 0; 365 return ret;
364} 366}
365 367
366struct nvkm_oclass 368struct nvkm_oclass
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
index f67cdae1e90a..f4611e3f0971 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
@@ -1285,6 +1285,44 @@ init_zm_reg_sequence(struct nvbios_init *init)
1285} 1285}
1286 1286
1287/** 1287/**
1288 * INIT_PLL_INDIRECT - opcode 0x59
1289 *
1290 */
1291static void
1292init_pll_indirect(struct nvbios_init *init)
1293{
1294 struct nvkm_bios *bios = init->bios;
1295 u32 reg = nv_ro32(bios, init->offset + 1);
1296 u16 addr = nv_ro16(bios, init->offset + 5);
1297 u32 freq = (u32)nv_ro16(bios, addr) * 1000;
1298
1299 trace("PLL_INDIRECT\tR[0x%06x] =PLL= VBIOS[%04x] = %dkHz\n",
1300 reg, addr, freq);
1301 init->offset += 7;
1302
1303 init_prog_pll(init, reg, freq);
1304}
1305
1306/**
1307 * INIT_ZM_REG_INDIRECT - opcode 0x5a
1308 *
1309 */
1310static void
1311init_zm_reg_indirect(struct nvbios_init *init)
1312{
1313 struct nvkm_bios *bios = init->bios;
1314 u32 reg = nv_ro32(bios, init->offset + 1);
1315 u16 addr = nv_ro16(bios, init->offset + 5);
1316 u32 data = nv_ro32(bios, addr);
1317
1318 trace("ZM_REG_INDIRECT\tR[0x%06x] = VBIOS[0x%04x] = 0x%08x\n",
1319 reg, addr, data);
1320 init->offset += 7;
1321
1322 init_wr32(init, addr, data);
1323}
1324
1325/**
1288 * INIT_SUB_DIRECT - opcode 0x5b 1326 * INIT_SUB_DIRECT - opcode 0x5b
1289 * 1327 *
1290 */ 1328 */
@@ -2145,6 +2183,8 @@ static struct nvbios_init_opcode {
2145 [0x56] = { init_condition_time }, 2183 [0x56] = { init_condition_time },
2146 [0x57] = { init_ltime }, 2184 [0x57] = { init_ltime },
2147 [0x58] = { init_zm_reg_sequence }, 2185 [0x58] = { init_zm_reg_sequence },
2186 [0x59] = { init_pll_indirect },
2187 [0x5a] = { init_zm_reg_indirect },
2148 [0x5b] = { init_sub_direct }, 2188 [0x5b] = { init_sub_direct },
2149 [0x5c] = { init_jump }, 2189 [0x5c] = { init_jump },
2150 [0x5e] = { init_i2c_if }, 2190 [0x5e] = { init_i2c_if },
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
index 822d32a28d6e..065e9f5c8db9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
@@ -180,7 +180,8 @@ gt215_clk_info(struct nvkm_clk *clock, int clk, u32 khz,
180 struct gt215_clk_info *info) 180 struct gt215_clk_info *info)
181{ 181{
182 struct gt215_clk_priv *priv = (void *)clock; 182 struct gt215_clk_priv *priv = (void *)clock;
183 u32 oclk, sclk, sdiv, diff; 183 u32 oclk, sclk, sdiv;
184 s32 diff;
184 185
185 info->clk = 0; 186 info->clk = 0;
186 187
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
index c0fdb89e74ac..24dcdfb58a8d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
@@ -38,6 +38,14 @@ gk20a_ibus_init_priv_ring(struct gk20a_ibus_priv *priv)
38 nv_wr32(priv, 0x12004c, 0x4); 38 nv_wr32(priv, 0x12004c, 0x4);
39 nv_wr32(priv, 0x122204, 0x2); 39 nv_wr32(priv, 0x122204, 0x2);
40 nv_rd32(priv, 0x122204); 40 nv_rd32(priv, 0x122204);
41
42 /*
43 * Bug: increase clock timeout to avoid operation failure at high
44 * gpcclk rate.
45 */
46 nv_wr32(priv, 0x122354, 0x800);
47 nv_wr32(priv, 0x128328, 0x800);
48 nv_wr32(priv, 0x124320, 0x800);
41} 49}
42 50
43static void 51static void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
index 80614f1b2074..282143f49d72 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
@@ -50,7 +50,12 @@ nv04_instobj_dtor(struct nvkm_object *object)
50{ 50{
51 struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object); 51 struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object);
52 struct nv04_instobj_priv *node = (void *)object; 52 struct nv04_instobj_priv *node = (void *)object;
53 struct nvkm_subdev *subdev = (void *)priv;
54
55 mutex_lock(&subdev->mutex);
53 nvkm_mm_free(&priv->heap, &node->mem); 56 nvkm_mm_free(&priv->heap, &node->mem);
57 mutex_unlock(&subdev->mutex);
58
54 nvkm_instobj_destroy(&node->base); 59 nvkm_instobj_destroy(&node->base);
55} 60}
56 61
@@ -62,6 +67,7 @@ nv04_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
62 struct nv04_instmem_priv *priv = (void *)nvkm_instmem(parent); 67 struct nv04_instmem_priv *priv = (void *)nvkm_instmem(parent);
63 struct nv04_instobj_priv *node; 68 struct nv04_instobj_priv *node;
64 struct nvkm_instobj_args *args = data; 69 struct nvkm_instobj_args *args = data;
70 struct nvkm_subdev *subdev = (void *)priv;
65 int ret; 71 int ret;
66 72
67 if (!args->align) 73 if (!args->align)
@@ -72,8 +78,10 @@ nv04_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
72 if (ret) 78 if (ret)
73 return ret; 79 return ret;
74 80
81 mutex_lock(&subdev->mutex);
75 ret = nvkm_mm_head(&priv->heap, 0, 1, args->size, args->size, 82 ret = nvkm_mm_head(&priv->heap, 0, 1, args->size, args->size,
76 args->align, &node->mem); 83 args->align, &node->mem);
84 mutex_unlock(&subdev->mutex);
77 if (ret) 85 if (ret)
78 return ret; 86 return ret;
79 87
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index dd39f434b4a7..c3872598b85a 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -2299,8 +2299,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
2299 encoder_mode = atombios_get_encoder_mode(encoder); 2299 encoder_mode = atombios_get_encoder_mode(encoder);
2300 if (connector && (radeon_audio != 0) && 2300 if (connector && (radeon_audio != 0) &&
2301 ((encoder_mode == ATOM_ENCODER_MODE_HDMI) || 2301 ((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
2302 (ENCODER_MODE_IS_DP(encoder_mode) && 2302 ENCODER_MODE_IS_DP(encoder_mode)))
2303 drm_detect_monitor_audio(radeon_connector_edid(connector)))))
2304 radeon_audio_mode_set(encoder, adjusted_mode); 2303 radeon_audio_mode_set(encoder, adjusted_mode);
2305} 2304}
2306 2305
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 8730562323a8..4a09947be244 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -5818,7 +5818,7 @@ int ci_dpm_init(struct radeon_device *rdev)
5818 tmp |= DPM_ENABLED; 5818 tmp |= DPM_ENABLED;
5819 break; 5819 break;
5820 default: 5820 default:
5821 DRM_ERROR("Invalid PCC GPIO: %u!\n", gpio.shift); 5821 DRM_DEBUG("Invalid PCC GPIO: %u!\n", gpio.shift);
5822 break; 5822 break;
5823 } 5823 }
5824 WREG32_SMC(CNB_PWRMGT_CNTL, tmp); 5824 WREG32_SMC(CNB_PWRMGT_CNTL, tmp);
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 68fd9fc677e3..44480c1b9738 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -93,30 +93,26 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder)
93 struct radeon_device *rdev = encoder->dev->dev_private; 93 struct radeon_device *rdev = encoder->dev->dev_private;
94 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 94 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
95 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 95 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
96 u32 offset;
97 96
98 if (!dig || !dig->afmt || !dig->afmt->pin) 97 if (!dig || !dig->afmt || !dig->pin)
99 return; 98 return;
100 99
101 offset = dig->afmt->offset; 100 WREG32(AFMT_AUDIO_SRC_CONTROL + dig->afmt->offset,
102 101 AFMT_AUDIO_SRC_SELECT(dig->pin->id));
103 WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
104 AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
105} 102}
106 103
107void dce6_afmt_write_latency_fields(struct drm_encoder *encoder, 104void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
108 struct drm_connector *connector, struct drm_display_mode *mode) 105 struct drm_connector *connector,
106 struct drm_display_mode *mode)
109{ 107{
110 struct radeon_device *rdev = encoder->dev->dev_private; 108 struct radeon_device *rdev = encoder->dev->dev_private;
111 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 109 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
112 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 110 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
113 u32 tmp = 0, offset; 111 u32 tmp = 0;
114 112
115 if (!dig || !dig->afmt || !dig->afmt->pin) 113 if (!dig || !dig->afmt || !dig->pin)
116 return; 114 return;
117 115
118 offset = dig->afmt->pin->offset;
119
120 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 116 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
121 if (connector->latency_present[1]) 117 if (connector->latency_present[1])
122 tmp = VIDEO_LIPSYNC(connector->video_latency[1]) | 118 tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
@@ -130,24 +126,24 @@ void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
130 else 126 else
131 tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0); 127 tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0);
132 } 128 }
133 WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp); 129 WREG32_ENDPOINT(dig->pin->offset,
130 AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
134} 131}
135 132
136void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder, 133void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
137 u8 *sadb, int sad_count) 134 u8 *sadb, int sad_count)
138{ 135{
139 struct radeon_device *rdev = encoder->dev->dev_private; 136 struct radeon_device *rdev = encoder->dev->dev_private;
140 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 137 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
141 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 138 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
142 u32 offset, tmp; 139 u32 tmp;
143 140
144 if (!dig || !dig->afmt || !dig->afmt->pin) 141 if (!dig || !dig->afmt || !dig->pin)
145 return; 142 return;
146 143
147 offset = dig->afmt->pin->offset;
148
149 /* program the speaker allocation */ 144 /* program the speaker allocation */
150 tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER); 145 tmp = RREG32_ENDPOINT(dig->pin->offset,
146 AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
151 tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK); 147 tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
152 /* set HDMI mode */ 148 /* set HDMI mode */
153 tmp |= HDMI_CONNECTION; 149 tmp |= HDMI_CONNECTION;
@@ -155,24 +151,24 @@ void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
155 tmp |= SPEAKER_ALLOCATION(sadb[0]); 151 tmp |= SPEAKER_ALLOCATION(sadb[0]);
156 else 152 else
157 tmp |= SPEAKER_ALLOCATION(5); /* stereo */ 153 tmp |= SPEAKER_ALLOCATION(5); /* stereo */
158 WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp); 154 WREG32_ENDPOINT(dig->pin->offset,
155 AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
159} 156}
160 157
161void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder, 158void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
162 u8 *sadb, int sad_count) 159 u8 *sadb, int sad_count)
163{ 160{
164 struct radeon_device *rdev = encoder->dev->dev_private; 161 struct radeon_device *rdev = encoder->dev->dev_private;
165 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 162 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
166 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 163 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
167 u32 offset, tmp; 164 u32 tmp;
168 165
169 if (!dig || !dig->afmt || !dig->afmt->pin) 166 if (!dig || !dig->afmt || !dig->pin)
170 return; 167 return;
171 168
172 offset = dig->afmt->pin->offset;
173
174 /* program the speaker allocation */ 169 /* program the speaker allocation */
175 tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER); 170 tmp = RREG32_ENDPOINT(dig->pin->offset,
171 AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
176 tmp &= ~(HDMI_CONNECTION | SPEAKER_ALLOCATION_MASK); 172 tmp &= ~(HDMI_CONNECTION | SPEAKER_ALLOCATION_MASK);
177 /* set DP mode */ 173 /* set DP mode */
178 tmp |= DP_CONNECTION; 174 tmp |= DP_CONNECTION;
@@ -180,13 +176,13 @@ void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
180 tmp |= SPEAKER_ALLOCATION(sadb[0]); 176 tmp |= SPEAKER_ALLOCATION(sadb[0]);
181 else 177 else
182 tmp |= SPEAKER_ALLOCATION(5); /* stereo */ 178 tmp |= SPEAKER_ALLOCATION(5); /* stereo */
183 WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp); 179 WREG32_ENDPOINT(dig->pin->offset,
180 AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
184} 181}
185 182
186void dce6_afmt_write_sad_regs(struct drm_encoder *encoder, 183void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
187 struct cea_sad *sads, int sad_count) 184 struct cea_sad *sads, int sad_count)
188{ 185{
189 u32 offset;
190 int i; 186 int i;
191 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 187 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
192 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 188 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
@@ -206,11 +202,9 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
206 { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, 202 { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
207 }; 203 };
208 204
209 if (!dig || !dig->afmt || !dig->afmt->pin) 205 if (!dig || !dig->afmt || !dig->pin)
210 return; 206 return;
211 207
212 offset = dig->afmt->pin->offset;
213
214 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { 208 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
215 u32 value = 0; 209 u32 value = 0;
216 u8 stereo_freqs = 0; 210 u8 stereo_freqs = 0;
@@ -237,7 +231,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
237 231
238 value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs); 232 value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
239 233
240 WREG32_ENDPOINT(offset, eld_reg_to_type[i][0], value); 234 WREG32_ENDPOINT(dig->pin->offset, eld_reg_to_type[i][0], value);
241 } 235 }
242} 236}
243 237
@@ -253,7 +247,7 @@ void dce6_audio_enable(struct radeon_device *rdev,
253} 247}
254 248
255void dce6_hdmi_audio_set_dto(struct radeon_device *rdev, 249void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
256 struct radeon_crtc *crtc, unsigned int clock) 250 struct radeon_crtc *crtc, unsigned int clock)
257{ 251{
258 /* Two dtos; generally use dto0 for HDMI */ 252 /* Two dtos; generally use dto0 for HDMI */
259 u32 value = 0; 253 u32 value = 0;
@@ -272,7 +266,7 @@ void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
272} 266}
273 267
274void dce6_dp_audio_set_dto(struct radeon_device *rdev, 268void dce6_dp_audio_set_dto(struct radeon_device *rdev,
275 struct radeon_crtc *crtc, unsigned int clock) 269 struct radeon_crtc *crtc, unsigned int clock)
276{ 270{
277 /* Two dtos; generally use dto1 for DP */ 271 /* Two dtos; generally use dto1 for DP */
278 u32 value = 0; 272 u32 value = 0;
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index fa719c53449b..fbc8d88d6e5d 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -245,6 +245,28 @@ static struct radeon_audio_funcs dce6_dp_funcs = {
245static void radeon_audio_enable(struct radeon_device *rdev, 245static void radeon_audio_enable(struct radeon_device *rdev,
246 struct r600_audio_pin *pin, u8 enable_mask) 246 struct r600_audio_pin *pin, u8 enable_mask)
247{ 247{
248 struct drm_encoder *encoder;
249 struct radeon_encoder *radeon_encoder;
250 struct radeon_encoder_atom_dig *dig;
251 int pin_count = 0;
252
253 if (!pin)
254 return;
255
256 if (rdev->mode_info.mode_config_initialized) {
257 list_for_each_entry(encoder, &rdev->ddev->mode_config.encoder_list, head) {
258 if (radeon_encoder_is_digital(encoder)) {
259 radeon_encoder = to_radeon_encoder(encoder);
260 dig = radeon_encoder->enc_priv;
261 if (dig->pin == pin)
262 pin_count++;
263 }
264 }
265
266 if ((pin_count > 1) && (enable_mask == 0))
267 return;
268 }
269
248 if (rdev->audio.funcs->enable) 270 if (rdev->audio.funcs->enable)
249 rdev->audio.funcs->enable(rdev, pin, enable_mask); 271 rdev->audio.funcs->enable(rdev, pin, enable_mask);
250} 272}
@@ -336,24 +358,13 @@ void radeon_audio_endpoint_wreg(struct radeon_device *rdev, u32 offset,
336 358
337static void radeon_audio_write_sad_regs(struct drm_encoder *encoder) 359static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
338{ 360{
339 struct radeon_encoder *radeon_encoder; 361 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
340 struct drm_connector *connector; 362 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
341 struct radeon_connector *radeon_connector = NULL;
342 struct cea_sad *sads; 363 struct cea_sad *sads;
343 int sad_count; 364 int sad_count;
344 365
345 list_for_each_entry(connector, 366 if (!connector)
346 &encoder->dev->mode_config.connector_list, head) {
347 if (connector->encoder == encoder) {
348 radeon_connector = to_radeon_connector(connector);
349 break;
350 }
351 }
352
353 if (!radeon_connector) {
354 DRM_ERROR("Couldn't find encoder's connector\n");
355 return; 367 return;
356 }
357 368
358 sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads); 369 sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads);
359 if (sad_count <= 0) { 370 if (sad_count <= 0) {
@@ -362,8 +373,6 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
362 } 373 }
363 BUG_ON(!sads); 374 BUG_ON(!sads);
364 375
365 radeon_encoder = to_radeon_encoder(encoder);
366
367 if (radeon_encoder->audio && radeon_encoder->audio->write_sad_regs) 376 if (radeon_encoder->audio && radeon_encoder->audio->write_sad_regs)
368 radeon_encoder->audio->write_sad_regs(encoder, sads, sad_count); 377 radeon_encoder->audio->write_sad_regs(encoder, sads, sad_count);
369 378
@@ -372,27 +381,16 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
372 381
373static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder) 382static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder)
374{ 383{
384 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
375 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 385 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
376 struct drm_connector *connector;
377 struct radeon_connector *radeon_connector = NULL;
378 u8 *sadb = NULL; 386 u8 *sadb = NULL;
379 int sad_count; 387 int sad_count;
380 388
381 list_for_each_entry(connector, 389 if (!connector)
382 &encoder->dev->mode_config.connector_list, head) {
383 if (connector->encoder == encoder) {
384 radeon_connector = to_radeon_connector(connector);
385 break;
386 }
387 }
388
389 if (!radeon_connector) {
390 DRM_ERROR("Couldn't find encoder's connector\n");
391 return; 390 return;
392 }
393 391
394 sad_count = drm_edid_to_speaker_allocation( 392 sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector),
395 radeon_connector_edid(connector), &sadb); 393 &sadb);
396 if (sad_count < 0) { 394 if (sad_count < 0) {
397 DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n", 395 DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n",
398 sad_count); 396 sad_count);
@@ -406,26 +404,13 @@ static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder)
406} 404}
407 405
408static void radeon_audio_write_latency_fields(struct drm_encoder *encoder, 406static void radeon_audio_write_latency_fields(struct drm_encoder *encoder,
409 struct drm_display_mode *mode) 407 struct drm_display_mode *mode)
410{ 408{
411 struct radeon_encoder *radeon_encoder; 409 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
412 struct drm_connector *connector; 410 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
413 struct radeon_connector *radeon_connector = 0;
414
415 list_for_each_entry(connector,
416 &encoder->dev->mode_config.connector_list, head) {
417 if (connector->encoder == encoder) {
418 radeon_connector = to_radeon_connector(connector);
419 break;
420 }
421 }
422 411
423 if (!radeon_connector) { 412 if (!connector)
424 DRM_ERROR("Couldn't find encoder's connector\n");
425 return; 413 return;
426 }
427
428 radeon_encoder = to_radeon_encoder(encoder);
429 414
430 if (radeon_encoder->audio && radeon_encoder->audio->write_latency_fields) 415 if (radeon_encoder->audio && radeon_encoder->audio->write_latency_fields)
431 radeon_encoder->audio->write_latency_fields(encoder, connector, mode); 416 radeon_encoder->audio->write_latency_fields(encoder, connector, mode);
@@ -451,29 +436,23 @@ static void radeon_audio_select_pin(struct drm_encoder *encoder)
451} 436}
452 437
453void radeon_audio_detect(struct drm_connector *connector, 438void radeon_audio_detect(struct drm_connector *connector,
439 struct drm_encoder *encoder,
454 enum drm_connector_status status) 440 enum drm_connector_status status)
455{ 441{
456 struct radeon_device *rdev; 442 struct drm_device *dev = connector->dev;
457 struct radeon_encoder *radeon_encoder; 443 struct radeon_device *rdev = dev->dev_private;
444 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
458 struct radeon_encoder_atom_dig *dig; 445 struct radeon_encoder_atom_dig *dig;
459 446
460 if (!connector || !connector->encoder) 447 if (!radeon_audio_chipset_supported(rdev))
461 return; 448 return;
462 449
463 rdev = connector->encoder->dev->dev_private; 450 if (!radeon_encoder_is_digital(encoder))
464
465 if (!radeon_audio_chipset_supported(rdev))
466 return; 451 return;
467 452
468 radeon_encoder = to_radeon_encoder(connector->encoder);
469 dig = radeon_encoder->enc_priv; 453 dig = radeon_encoder->enc_priv;
470 454
471 if (status == connector_status_connected) { 455 if (status == connector_status_connected) {
472 if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) {
473 radeon_encoder->audio = NULL;
474 return;
475 }
476
477 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { 456 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
478 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 457 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
479 458
@@ -486,11 +465,17 @@ void radeon_audio_detect(struct drm_connector *connector,
486 radeon_encoder->audio = rdev->audio.hdmi_funcs; 465 radeon_encoder->audio = rdev->audio.hdmi_funcs;
487 } 466 }
488 467
489 dig->afmt->pin = radeon_audio_get_pin(connector->encoder); 468 if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
490 radeon_audio_enable(rdev, dig->afmt->pin, 0xf); 469 if (!dig->pin)
470 dig->pin = radeon_audio_get_pin(encoder);
471 radeon_audio_enable(rdev, dig->pin, 0xf);
472 } else {
473 radeon_audio_enable(rdev, dig->pin, 0);
474 dig->pin = NULL;
475 }
491 } else { 476 } else {
492 radeon_audio_enable(rdev, dig->afmt->pin, 0); 477 radeon_audio_enable(rdev, dig->pin, 0);
493 dig->afmt->pin = NULL; 478 dig->pin = NULL;
494 } 479 }
495} 480}
496 481
@@ -518,29 +503,18 @@ static void radeon_audio_set_dto(struct drm_encoder *encoder, unsigned int clock
518} 503}
519 504
520static int radeon_audio_set_avi_packet(struct drm_encoder *encoder, 505static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
521 struct drm_display_mode *mode) 506 struct drm_display_mode *mode)
522{ 507{
523 struct radeon_device *rdev = encoder->dev->dev_private; 508 struct radeon_device *rdev = encoder->dev->dev_private;
524 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 509 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
525 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 510 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
526 struct drm_connector *connector; 511 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
527 struct radeon_connector *radeon_connector = NULL;
528 u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE]; 512 u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
529 struct hdmi_avi_infoframe frame; 513 struct hdmi_avi_infoframe frame;
530 int err; 514 int err;
531 515
532 list_for_each_entry(connector, 516 if (!connector)
533 &encoder->dev->mode_config.connector_list, head) { 517 return -EINVAL;
534 if (connector->encoder == encoder) {
535 radeon_connector = to_radeon_connector(connector);
536 break;
537 }
538 }
539
540 if (!radeon_connector) {
541 DRM_ERROR("Couldn't find encoder's connector\n");
542 return -ENOENT;
543 }
544 518
545 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); 519 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
546 if (err < 0) { 520 if (err < 0) {
@@ -563,8 +537,8 @@ static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
563 return err; 537 return err;
564 } 538 }
565 539
566 if (dig && dig->afmt && 540 if (dig && dig->afmt && radeon_encoder->audio &&
567 radeon_encoder->audio && radeon_encoder->audio->set_avi_packet) 541 radeon_encoder->audio->set_avi_packet)
568 radeon_encoder->audio->set_avi_packet(rdev, dig->afmt->offset, 542 radeon_encoder->audio->set_avi_packet(rdev, dig->afmt->offset,
569 buffer, sizeof(buffer)); 543 buffer, sizeof(buffer));
570 544
@@ -722,30 +696,41 @@ static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder,
722{ 696{
723 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 697 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
724 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 698 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
699 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
725 700
726 if (!dig || !dig->afmt) 701 if (!dig || !dig->afmt)
727 return; 702 return;
728 703
729 radeon_audio_set_mute(encoder, true); 704 if (!connector)
705 return;
730 706
731 radeon_audio_write_speaker_allocation(encoder); 707 if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
732 radeon_audio_write_sad_regs(encoder); 708 radeon_audio_set_mute(encoder, true);
733 radeon_audio_write_latency_fields(encoder, mode);
734 radeon_audio_set_dto(encoder, mode->clock);
735 radeon_audio_set_vbi_packet(encoder);
736 radeon_hdmi_set_color_depth(encoder);
737 radeon_audio_update_acr(encoder, mode->clock);
738 radeon_audio_set_audio_packet(encoder);
739 radeon_audio_select_pin(encoder);
740 709
741 if (radeon_audio_set_avi_packet(encoder, mode) < 0) 710 radeon_audio_write_speaker_allocation(encoder);
742 return; 711 radeon_audio_write_sad_regs(encoder);
712 radeon_audio_write_latency_fields(encoder, mode);
713 radeon_audio_set_dto(encoder, mode->clock);
714 radeon_audio_set_vbi_packet(encoder);
715 radeon_hdmi_set_color_depth(encoder);
716 radeon_audio_update_acr(encoder, mode->clock);
717 radeon_audio_set_audio_packet(encoder);
718 radeon_audio_select_pin(encoder);
719
720 if (radeon_audio_set_avi_packet(encoder, mode) < 0)
721 return;
743 722
744 radeon_audio_set_mute(encoder, false); 723 radeon_audio_set_mute(encoder, false);
724 } else {
725 radeon_hdmi_set_color_depth(encoder);
726
727 if (radeon_audio_set_avi_packet(encoder, mode) < 0)
728 return;
729 }
745} 730}
746 731
747static void radeon_audio_dp_mode_set(struct drm_encoder *encoder, 732static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
748 struct drm_display_mode *mode) 733 struct drm_display_mode *mode)
749{ 734{
750 struct drm_device *dev = encoder->dev; 735 struct drm_device *dev = encoder->dev;
751 struct radeon_device *rdev = dev->dev_private; 736 struct radeon_device *rdev = dev->dev_private;
@@ -759,22 +744,27 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
759 if (!dig || !dig->afmt) 744 if (!dig || !dig->afmt)
760 return; 745 return;
761 746
762 radeon_audio_write_speaker_allocation(encoder); 747 if (!connector)
763 radeon_audio_write_sad_regs(encoder);
764 radeon_audio_write_latency_fields(encoder, mode);
765 if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev))
766 radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
767 else
768 radeon_audio_set_dto(encoder, dig_connector->dp_clock);
769 radeon_audio_set_audio_packet(encoder);
770 radeon_audio_select_pin(encoder);
771
772 if (radeon_audio_set_avi_packet(encoder, mode) < 0)
773 return; 748 return;
749
750 if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
751 radeon_audio_write_speaker_allocation(encoder);
752 radeon_audio_write_sad_regs(encoder);
753 radeon_audio_write_latency_fields(encoder, mode);
754 if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev))
755 radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
756 else
757 radeon_audio_set_dto(encoder, dig_connector->dp_clock);
758 radeon_audio_set_audio_packet(encoder);
759 radeon_audio_select_pin(encoder);
760
761 if (radeon_audio_set_avi_packet(encoder, mode) < 0)
762 return;
763 }
774} 764}
775 765
776void radeon_audio_mode_set(struct drm_encoder *encoder, 766void radeon_audio_mode_set(struct drm_encoder *encoder,
777 struct drm_display_mode *mode) 767 struct drm_display_mode *mode)
778{ 768{
779 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 769 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
780 770
diff --git a/drivers/gpu/drm/radeon/radeon_audio.h b/drivers/gpu/drm/radeon/radeon_audio.h
index 8438304f7139..059cc3012062 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.h
+++ b/drivers/gpu/drm/radeon/radeon_audio.h
@@ -68,7 +68,8 @@ struct radeon_audio_funcs
68 68
69int radeon_audio_init(struct radeon_device *rdev); 69int radeon_audio_init(struct radeon_device *rdev);
70void radeon_audio_detect(struct drm_connector *connector, 70void radeon_audio_detect(struct drm_connector *connector,
71 enum drm_connector_status status); 71 struct drm_encoder *encoder,
72 enum drm_connector_status status);
72u32 radeon_audio_endpoint_rreg(struct radeon_device *rdev, 73u32 radeon_audio_endpoint_rreg(struct radeon_device *rdev,
73 u32 offset, u32 reg); 74 u32 offset, u32 reg);
74void radeon_audio_endpoint_wreg(struct radeon_device *rdev, 75void radeon_audio_endpoint_wreg(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 3e5f6b71f3ad..c097d3a82bda 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -1255,10 +1255,15 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
1255 1255
1256 if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) && 1256 if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) &&
1257 (RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) { 1257 (RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) {
1258 u32 hss = (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
1259
1260 if (hss > lvds->native_mode.hdisplay)
1261 hss = (10 - 1) * 8;
1262
1258 lvds->native_mode.htotal = lvds->native_mode.hdisplay + 1263 lvds->native_mode.htotal = lvds->native_mode.hdisplay +
1259 (RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8; 1264 (RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8;
1260 lvds->native_mode.hsync_start = lvds->native_mode.hdisplay + 1265 lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
1261 (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8; 1266 hss;
1262 lvds->native_mode.hsync_end = lvds->native_mode.hsync_start + 1267 lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
1263 (RBIOS8(tmp + 23) * 8); 1268 (RBIOS8(tmp + 23) * 8);
1264 1269
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index cebb65e07e1d..94b21ae70ef7 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1379,8 +1379,16 @@ out:
1379 /* updated in get modes as well since we need to know if it's analog or digital */ 1379 /* updated in get modes as well since we need to know if it's analog or digital */
1380 radeon_connector_update_scratch_regs(connector, ret); 1380 radeon_connector_update_scratch_regs(connector, ret);
1381 1381
1382 if (radeon_audio != 0) 1382 if ((radeon_audio != 0) && radeon_connector->use_digital) {
1383 radeon_audio_detect(connector, ret); 1383 const struct drm_connector_helper_funcs *connector_funcs =
1384 connector->helper_private;
1385
1386 encoder = connector_funcs->best_encoder(connector);
1387 if (encoder && (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)) {
1388 radeon_connector_get_edid(connector);
1389 radeon_audio_detect(connector, encoder, ret);
1390 }
1391 }
1384 1392
1385exit: 1393exit:
1386 pm_runtime_mark_last_busy(connector->dev->dev); 1394 pm_runtime_mark_last_busy(connector->dev->dev);
@@ -1717,8 +1725,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1717 1725
1718 radeon_connector_update_scratch_regs(connector, ret); 1726 radeon_connector_update_scratch_regs(connector, ret);
1719 1727
1720 if (radeon_audio != 0) 1728 if ((radeon_audio != 0) && encoder) {
1721 radeon_audio_detect(connector, ret); 1729 radeon_connector_get_edid(connector);
1730 radeon_audio_detect(connector, encoder, ret);
1731 }
1722 1732
1723out: 1733out:
1724 pm_runtime_mark_last_busy(connector->dev->dev); 1734 pm_runtime_mark_last_busy(connector->dev->dev);
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 5450fa95a47e..c4777c8d0312 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -260,8 +260,10 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
260 } 260 }
261 } 261 }
262 } 262 }
263 mb(); 263 if (rdev->gart.ptr) {
264 radeon_gart_tlb_flush(rdev); 264 mb();
265 radeon_gart_tlb_flush(rdev);
266 }
265} 267}
266 268
267/** 269/**
@@ -306,8 +308,10 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
306 page_base += RADEON_GPU_PAGE_SIZE; 308 page_base += RADEON_GPU_PAGE_SIZE;
307 } 309 }
308 } 310 }
309 mb(); 311 if (rdev->gart.ptr) {
310 radeon_gart_tlb_flush(rdev); 312 mb();
313 radeon_gart_tlb_flush(rdev);
314 }
311 return 0; 315 return 0;
312} 316}
313 317
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 013ec7106e55..3dcc5733ff69 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -36,6 +36,7 @@ void radeon_gem_object_free(struct drm_gem_object *gobj)
36 if (robj) { 36 if (robj) {
37 if (robj->gem_base.import_attach) 37 if (robj->gem_base.import_attach)
38 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg); 38 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
39 radeon_mn_unregister(robj);
39 radeon_bo_unref(&robj); 40 radeon_bo_unref(&robj);
40 } 41 }
41} 42}
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 1162bfa464f3..171d3e43c30c 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -79,6 +79,11 @@ static void radeon_hotplug_work_func(struct work_struct *work)
79 struct drm_mode_config *mode_config = &dev->mode_config; 79 struct drm_mode_config *mode_config = &dev->mode_config;
80 struct drm_connector *connector; 80 struct drm_connector *connector;
81 81
82 /* we can race here at startup, some boards seem to trigger
83 * hotplug irqs when they shouldn't. */
84 if (!rdev->mode_info.mode_config_initialized)
85 return;
86
82 mutex_lock(&mode_config->mutex); 87 mutex_lock(&mode_config->mutex);
83 if (mode_config->num_connector) { 88 if (mode_config->num_connector) {
84 list_for_each_entry(connector, &mode_config->connector_list, head) 89 list_for_each_entry(connector, &mode_config->connector_list, head)
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 07909d817381..aecc3e3dec0c 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -237,7 +237,6 @@ struct radeon_afmt {
237 int offset; 237 int offset;
238 bool last_buffer_filled_status; 238 bool last_buffer_filled_status;
239 int id; 239 int id;
240 struct r600_audio_pin *pin;
241}; 240};
242 241
243struct radeon_mode_info { 242struct radeon_mode_info {
@@ -439,6 +438,7 @@ struct radeon_encoder_atom_dig {
439 uint8_t backlight_level; 438 uint8_t backlight_level;
440 int panel_mode; 439 int panel_mode;
441 struct radeon_afmt *afmt; 440 struct radeon_afmt *afmt;
441 struct r600_audio_pin *pin;
442 int active_mst_links; 442 int active_mst_links;
443}; 443};
444 444
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 318165d4855c..676362769b8d 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -75,7 +75,6 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
75 bo = container_of(tbo, struct radeon_bo, tbo); 75 bo = container_of(tbo, struct radeon_bo, tbo);
76 76
77 radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1); 77 radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
78 radeon_mn_unregister(bo);
79 78
80 mutex_lock(&bo->rdev->gem.mutex); 79 mutex_lock(&bo->rdev->gem.mutex);
81 list_del_init(&bo->list); 80 list_del_init(&bo->list);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 1dbdf3230dae..787cd8fd897f 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2926,6 +2926,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
2926 /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */ 2926 /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
2927 { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, 2927 { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
2928 { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 }, 2928 { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
2929 { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
2929 { 0, 0, 0, 0 }, 2930 { 0, 0, 0, 0 },
2930}; 2931};
2931 2932
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 01b558fe3695..9a0c2911272a 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -555,7 +555,6 @@ static struct platform_driver rockchip_drm_platform_driver = {
555 .probe = rockchip_drm_platform_probe, 555 .probe = rockchip_drm_platform_probe,
556 .remove = rockchip_drm_platform_remove, 556 .remove = rockchip_drm_platform_remove,
557 .driver = { 557 .driver = {
558 .owner = THIS_MODULE,
559 .name = "rockchip-drm", 558 .name = "rockchip-drm",
560 .of_match_table = rockchip_drm_dt_ids, 559 .of_match_table = rockchip_drm_dt_ids,
561 .pm = &rockchip_drm_pm_ops, 560 .pm = &rockchip_drm_pm_ops,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index 77d52893d40f..002645bb5bbf 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -162,7 +162,8 @@ static void rockchip_drm_output_poll_changed(struct drm_device *dev)
162 struct rockchip_drm_private *private = dev->dev_private; 162 struct rockchip_drm_private *private = dev->dev_private;
163 struct drm_fb_helper *fb_helper = &private->fbdev_helper; 163 struct drm_fb_helper *fb_helper = &private->fbdev_helper;
164 164
165 drm_fb_helper_hotplug_event(fb_helper); 165 if (fb_helper)
166 drm_fb_helper_hotplug_event(fb_helper);
166} 167}
167 168
168static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = { 169static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = {
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index eb2282cc4a56..eba5f8a52fbd 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -54,55 +54,56 @@ static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
54 &rk_obj->dma_attrs); 54 &rk_obj->dma_attrs);
55} 55}
56 56
57int rockchip_gem_mmap_buf(struct drm_gem_object *obj, 57static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
58 struct vm_area_struct *vma) 58 struct vm_area_struct *vma)
59
59{ 60{
61 int ret;
60 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); 62 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
61 struct drm_device *drm = obj->dev; 63 struct drm_device *drm = obj->dev;
62 unsigned long vm_size;
63 64
64 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; 65 /*
65 vm_size = vma->vm_end - vma->vm_start; 66 * dma_alloc_attrs() allocated a struct page table for rk_obj, so clear
66 67 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
67 if (vm_size > obj->size) 68 */
68 return -EINVAL; 69 vma->vm_flags &= ~VM_PFNMAP;
69 70
70 return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr, 71 ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
71 obj->size, &rk_obj->dma_attrs); 72 obj->size, &rk_obj->dma_attrs);
73 if (ret)
74 drm_gem_vm_close(vma);
75
76 return ret;
72} 77}
73 78
74/* drm driver mmap file operations */ 79int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
75int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma) 80 struct vm_area_struct *vma)
76{ 81{
77 struct drm_file *priv = filp->private_data; 82 struct drm_device *drm = obj->dev;
78 struct drm_device *dev = priv->minor->dev;
79 struct drm_gem_object *obj;
80 struct drm_vma_offset_node *node;
81 int ret; 83 int ret;
82 84
83 if (drm_device_is_unplugged(dev)) 85 mutex_lock(&drm->struct_mutex);
84 return -ENODEV; 86 ret = drm_gem_mmap_obj(obj, obj->size, vma);
87 mutex_unlock(&drm->struct_mutex);
88 if (ret)
89 return ret;
85 90
86 mutex_lock(&dev->struct_mutex); 91 return rockchip_drm_gem_object_mmap(obj, vma);
92}
87 93
88 node = drm_vma_offset_exact_lookup(dev->vma_offset_manager, 94/* drm driver mmap file operations */
89 vma->vm_pgoff, 95int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
90 vma_pages(vma)); 96{
91 if (!node) { 97 struct drm_gem_object *obj;
92 mutex_unlock(&dev->struct_mutex); 98 int ret;
93 DRM_ERROR("failed to find vma node.\n");
94 return -EINVAL;
95 } else if (!drm_vma_node_is_allowed(node, filp)) {
96 mutex_unlock(&dev->struct_mutex);
97 return -EACCES;
98 }
99 99
100 obj = container_of(node, struct drm_gem_object, vma_node); 100 ret = drm_gem_mmap(filp, vma);
101 ret = rockchip_gem_mmap_buf(obj, vma); 101 if (ret)
102 return ret;
102 103
103 mutex_unlock(&dev->struct_mutex); 104 obj = vma->vm_private_data;
104 105
105 return ret; 106 return rockchip_drm_gem_object_mmap(obj, vma);
106} 107}
107 108
108struct rockchip_gem_object * 109struct rockchip_gem_object *
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index dc65161d7cad..34b78e736532 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -170,6 +170,7 @@ struct vop_win_phy {
170 170
171 struct vop_reg enable; 171 struct vop_reg enable;
172 struct vop_reg format; 172 struct vop_reg format;
173 struct vop_reg rb_swap;
173 struct vop_reg act_info; 174 struct vop_reg act_info;
174 struct vop_reg dsp_info; 175 struct vop_reg dsp_info;
175 struct vop_reg dsp_st; 176 struct vop_reg dsp_st;
@@ -199,8 +200,12 @@ struct vop_data {
199static const uint32_t formats_01[] = { 200static const uint32_t formats_01[] = {
200 DRM_FORMAT_XRGB8888, 201 DRM_FORMAT_XRGB8888,
201 DRM_FORMAT_ARGB8888, 202 DRM_FORMAT_ARGB8888,
203 DRM_FORMAT_XBGR8888,
204 DRM_FORMAT_ABGR8888,
202 DRM_FORMAT_RGB888, 205 DRM_FORMAT_RGB888,
206 DRM_FORMAT_BGR888,
203 DRM_FORMAT_RGB565, 207 DRM_FORMAT_RGB565,
208 DRM_FORMAT_BGR565,
204 DRM_FORMAT_NV12, 209 DRM_FORMAT_NV12,
205 DRM_FORMAT_NV16, 210 DRM_FORMAT_NV16,
206 DRM_FORMAT_NV24, 211 DRM_FORMAT_NV24,
@@ -209,8 +214,12 @@ static const uint32_t formats_01[] = {
209static const uint32_t formats_234[] = { 214static const uint32_t formats_234[] = {
210 DRM_FORMAT_XRGB8888, 215 DRM_FORMAT_XRGB8888,
211 DRM_FORMAT_ARGB8888, 216 DRM_FORMAT_ARGB8888,
217 DRM_FORMAT_XBGR8888,
218 DRM_FORMAT_ABGR8888,
212 DRM_FORMAT_RGB888, 219 DRM_FORMAT_RGB888,
220 DRM_FORMAT_BGR888,
213 DRM_FORMAT_RGB565, 221 DRM_FORMAT_RGB565,
222 DRM_FORMAT_BGR565,
214}; 223};
215 224
216static const struct vop_win_phy win01_data = { 225static const struct vop_win_phy win01_data = {
@@ -218,6 +227,7 @@ static const struct vop_win_phy win01_data = {
218 .nformats = ARRAY_SIZE(formats_01), 227 .nformats = ARRAY_SIZE(formats_01),
219 .enable = VOP_REG(WIN0_CTRL0, 0x1, 0), 228 .enable = VOP_REG(WIN0_CTRL0, 0x1, 0),
220 .format = VOP_REG(WIN0_CTRL0, 0x7, 1), 229 .format = VOP_REG(WIN0_CTRL0, 0x7, 1),
230 .rb_swap = VOP_REG(WIN0_CTRL0, 0x1, 12),
221 .act_info = VOP_REG(WIN0_ACT_INFO, 0x1fff1fff, 0), 231 .act_info = VOP_REG(WIN0_ACT_INFO, 0x1fff1fff, 0),
222 .dsp_info = VOP_REG(WIN0_DSP_INFO, 0x0fff0fff, 0), 232 .dsp_info = VOP_REG(WIN0_DSP_INFO, 0x0fff0fff, 0),
223 .dsp_st = VOP_REG(WIN0_DSP_ST, 0x1fff1fff, 0), 233 .dsp_st = VOP_REG(WIN0_DSP_ST, 0x1fff1fff, 0),
@@ -234,6 +244,7 @@ static const struct vop_win_phy win23_data = {
234 .nformats = ARRAY_SIZE(formats_234), 244 .nformats = ARRAY_SIZE(formats_234),
235 .enable = VOP_REG(WIN2_CTRL0, 0x1, 0), 245 .enable = VOP_REG(WIN2_CTRL0, 0x1, 0),
236 .format = VOP_REG(WIN2_CTRL0, 0x7, 1), 246 .format = VOP_REG(WIN2_CTRL0, 0x7, 1),
247 .rb_swap = VOP_REG(WIN2_CTRL0, 0x1, 12),
237 .dsp_info = VOP_REG(WIN2_DSP_INFO0, 0x0fff0fff, 0), 248 .dsp_info = VOP_REG(WIN2_DSP_INFO0, 0x0fff0fff, 0),
238 .dsp_st = VOP_REG(WIN2_DSP_ST0, 0x1fff1fff, 0), 249 .dsp_st = VOP_REG(WIN2_DSP_ST0, 0x1fff1fff, 0),
239 .yrgb_mst = VOP_REG(WIN2_MST0, 0xffffffff, 0), 250 .yrgb_mst = VOP_REG(WIN2_MST0, 0xffffffff, 0),
@@ -242,15 +253,6 @@ static const struct vop_win_phy win23_data = {
242 .dst_alpha_ctl = VOP_REG(WIN2_DST_ALPHA_CTRL, 0xff, 0), 253 .dst_alpha_ctl = VOP_REG(WIN2_DST_ALPHA_CTRL, 0xff, 0),
243}; 254};
244 255
245static const struct vop_win_phy cursor_data = {
246 .data_formats = formats_234,
247 .nformats = ARRAY_SIZE(formats_234),
248 .enable = VOP_REG(HWC_CTRL0, 0x1, 0),
249 .format = VOP_REG(HWC_CTRL0, 0x7, 1),
250 .dsp_st = VOP_REG(HWC_DSP_ST, 0x1fff1fff, 0),
251 .yrgb_mst = VOP_REG(HWC_MST, 0xffffffff, 0),
252};
253
254static const struct vop_ctrl ctrl_data = { 256static const struct vop_ctrl ctrl_data = {
255 .standby = VOP_REG(SYS_CTRL, 0x1, 22), 257 .standby = VOP_REG(SYS_CTRL, 0x1, 22),
256 .gate_en = VOP_REG(SYS_CTRL, 0x1, 23), 258 .gate_en = VOP_REG(SYS_CTRL, 0x1, 23),
@@ -282,14 +284,14 @@ static const struct vop_reg_data vop_init_reg_table[] = {
282/* 284/*
283 * Note: rk3288 has a dedicated 'cursor' window, however, that window requires 285 * Note: rk3288 has a dedicated 'cursor' window, however, that window requires
284 * special support to get alpha blending working. For now, just use overlay 286 * special support to get alpha blending working. For now, just use overlay
285 * window 1 for the drm cursor. 287 * window 3 for the drm cursor.
288 *
286 */ 289 */
287static const struct vop_win_data rk3288_vop_win_data[] = { 290static const struct vop_win_data rk3288_vop_win_data[] = {
288 { .base = 0x00, .phy = &win01_data, .type = DRM_PLANE_TYPE_PRIMARY }, 291 { .base = 0x00, .phy = &win01_data, .type = DRM_PLANE_TYPE_PRIMARY },
289 { .base = 0x40, .phy = &win01_data, .type = DRM_PLANE_TYPE_CURSOR }, 292 { .base = 0x40, .phy = &win01_data, .type = DRM_PLANE_TYPE_OVERLAY },
290 { .base = 0x00, .phy = &win23_data, .type = DRM_PLANE_TYPE_OVERLAY }, 293 { .base = 0x00, .phy = &win23_data, .type = DRM_PLANE_TYPE_OVERLAY },
291 { .base = 0x50, .phy = &win23_data, .type = DRM_PLANE_TYPE_OVERLAY }, 294 { .base = 0x50, .phy = &win23_data, .type = DRM_PLANE_TYPE_CURSOR },
292 { .base = 0x00, .phy = &cursor_data, .type = DRM_PLANE_TYPE_OVERLAY },
293}; 295};
294 296
295static const struct vop_data rk3288_vop = { 297static const struct vop_data rk3288_vop = {
@@ -352,15 +354,32 @@ static inline void vop_mask_write_relaxed(struct vop *vop, uint32_t offset,
352 } 354 }
353} 355}
354 356
357static bool has_rb_swapped(uint32_t format)
358{
359 switch (format) {
360 case DRM_FORMAT_XBGR8888:
361 case DRM_FORMAT_ABGR8888:
362 case DRM_FORMAT_BGR888:
363 case DRM_FORMAT_BGR565:
364 return true;
365 default:
366 return false;
367 }
368}
369
355static enum vop_data_format vop_convert_format(uint32_t format) 370static enum vop_data_format vop_convert_format(uint32_t format)
356{ 371{
357 switch (format) { 372 switch (format) {
358 case DRM_FORMAT_XRGB8888: 373 case DRM_FORMAT_XRGB8888:
359 case DRM_FORMAT_ARGB8888: 374 case DRM_FORMAT_ARGB8888:
375 case DRM_FORMAT_XBGR8888:
376 case DRM_FORMAT_ABGR8888:
360 return VOP_FMT_ARGB8888; 377 return VOP_FMT_ARGB8888;
361 case DRM_FORMAT_RGB888: 378 case DRM_FORMAT_RGB888:
379 case DRM_FORMAT_BGR888:
362 return VOP_FMT_RGB888; 380 return VOP_FMT_RGB888;
363 case DRM_FORMAT_RGB565: 381 case DRM_FORMAT_RGB565:
382 case DRM_FORMAT_BGR565:
364 return VOP_FMT_RGB565; 383 return VOP_FMT_RGB565;
365 case DRM_FORMAT_NV12: 384 case DRM_FORMAT_NV12:
366 return VOP_FMT_YUV420SP; 385 return VOP_FMT_YUV420SP;
@@ -378,6 +397,7 @@ static bool is_alpha_support(uint32_t format)
378{ 397{
379 switch (format) { 398 switch (format) {
380 case DRM_FORMAT_ARGB8888: 399 case DRM_FORMAT_ARGB8888:
400 case DRM_FORMAT_ABGR8888:
381 return true; 401 return true;
382 default: 402 default:
383 return false; 403 return false;
@@ -588,6 +608,7 @@ static int vop_update_plane_event(struct drm_plane *plane,
588 enum vop_data_format format; 608 enum vop_data_format format;
589 uint32_t val; 609 uint32_t val;
590 bool is_alpha; 610 bool is_alpha;
611 bool rb_swap;
591 bool visible; 612 bool visible;
592 int ret; 613 int ret;
593 struct drm_rect dest = { 614 struct drm_rect dest = {
@@ -621,6 +642,7 @@ static int vop_update_plane_event(struct drm_plane *plane,
621 return 0; 642 return 0;
622 643
623 is_alpha = is_alpha_support(fb->pixel_format); 644 is_alpha = is_alpha_support(fb->pixel_format);
645 rb_swap = has_rb_swapped(fb->pixel_format);
624 format = vop_convert_format(fb->pixel_format); 646 format = vop_convert_format(fb->pixel_format);
625 if (format < 0) 647 if (format < 0)
626 return format; 648 return format;
@@ -689,6 +711,7 @@ static int vop_update_plane_event(struct drm_plane *plane,
689 val = (dsp_sty - 1) << 16; 711 val = (dsp_sty - 1) << 16;
690 val |= (dsp_stx - 1) & 0xffff; 712 val |= (dsp_stx - 1) & 0xffff;
691 VOP_WIN_SET(vop, win, dsp_st, val); 713 VOP_WIN_SET(vop, win, dsp_st, val);
714 VOP_WIN_SET(vop, win, rb_swap, rb_swap);
692 715
693 if (is_alpha) { 716 if (is_alpha) {
694 VOP_WIN_SET(vop, win, dst_alpha_ctl, 717 VOP_WIN_SET(vop, win, dst_alpha_ctl,
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 882cccdad272..ac6fe40b99f7 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -490,7 +490,8 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
490 else if (boot_cpu_data.x86 > 3) 490 else if (boot_cpu_data.x86 > 3)
491 tmp = pgprot_noncached(tmp); 491 tmp = pgprot_noncached(tmp);
492#endif 492#endif
493#if defined(__ia64__) || defined(__arm__) || defined(__powerpc__) 493#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
494 defined(__powerpc__)
494 if (caching_flags & TTM_PL_FLAG_WC) 495 if (caching_flags & TTM_PL_FLAG_WC)
495 tmp = pgprot_writecombine(tmp); 496 tmp = pgprot_writecombine(tmp);
496 else 497 else
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index 3077f1554099..624d941aaad1 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -963,14 +963,13 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
963 } else { 963 } else {
964 pool->npages_free += count; 964 pool->npages_free += count;
965 list_splice(&ttm_dma->pages_list, &pool->free_list); 965 list_splice(&ttm_dma->pages_list, &pool->free_list);
966 npages = count; 966 /*
967 if (pool->npages_free > _manager->options.max_size) { 967 * Wait to have at at least NUM_PAGES_TO_ALLOC number of pages
968 * to free in order to minimize calls to set_memory_wb().
969 */
970 if (pool->npages_free >= (_manager->options.max_size +
971 NUM_PAGES_TO_ALLOC))
968 npages = pool->npages_free - _manager->options.max_size; 972 npages = pool->npages_free - _manager->options.max_size;
969 /* free at least NUM_PAGES_TO_ALLOC number of pages
970 * to reduce calls to set_memory_wb */
971 if (npages < NUM_PAGES_TO_ALLOC)
972 npages = NUM_PAGES_TO_ALLOC;
973 }
974 } 973 }
975 spin_unlock_irqrestore(&pool->lock, irq_flags); 974 spin_unlock_irqrestore(&pool->lock, irq_flags);
976 975
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 654c8daeb5ab..97ad3bcb99a7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -2492,7 +2492,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2492 ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes, 2492 ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
2493 true, NULL); 2493 true, NULL);
2494 if (unlikely(ret != 0)) 2494 if (unlikely(ret != 0))
2495 goto out_err; 2495 goto out_err_nores;
2496 2496
2497 ret = vmw_validate_buffers(dev_priv, sw_context); 2497 ret = vmw_validate_buffers(dev_priv, sw_context);
2498 if (unlikely(ret != 0)) 2498 if (unlikely(ret != 0))
@@ -2536,6 +2536,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2536 vmw_resource_relocations_free(&sw_context->res_relocations); 2536 vmw_resource_relocations_free(&sw_context->res_relocations);
2537 2537
2538 vmw_fifo_commit(dev_priv, command_size); 2538 vmw_fifo_commit(dev_priv, command_size);
2539 mutex_unlock(&dev_priv->binding_mutex);
2539 2540
2540 vmw_query_bo_switch_commit(dev_priv, sw_context); 2541 vmw_query_bo_switch_commit(dev_priv, sw_context);
2541 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, 2542 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
@@ -2551,7 +2552,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2551 DRM_ERROR("Fence submission error. Syncing.\n"); 2552 DRM_ERROR("Fence submission error. Syncing.\n");
2552 2553
2553 vmw_resource_list_unreserve(&sw_context->resource_list, false); 2554 vmw_resource_list_unreserve(&sw_context->resource_list, false);
2554 mutex_unlock(&dev_priv->binding_mutex);
2555 2555
2556 ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes, 2556 ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
2557 (void *) fence); 2557 (void *) fence);
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 6d2f39d36e44..00f2058944e5 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -1107,6 +1107,9 @@ static int ipu_irq_init(struct ipu_soc *ipu)
1107 return ret; 1107 return ret;
1108 } 1108 }
1109 1109
1110 for (i = 0; i < IPU_NUM_IRQS; i += 32)
1111 ipu_cm_write(ipu, 0, IPU_INT_CTRL(i / 32));
1112
1110 for (i = 0; i < IPU_NUM_IRQS; i += 32) { 1113 for (i = 0; i < IPU_NUM_IRQS; i += 32) {
1111 gc = irq_get_domain_generic_chip(ipu->domain, i); 1114 gc = irq_get_domain_generic_chip(ipu->domain, i);
1112 gc->reg_base = ipu->cm_reg; 1115 gc->reg_base = ipu->cm_reg;
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index f822fd2a1ada..884d82f9190e 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -546,6 +546,12 @@ static const struct hid_device_id apple_devices[] = {
546 .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD }, 546 .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
547 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS), 547 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS),
548 .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, 548 .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
549 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI),
550 .driver_data = APPLE_HAS_FN },
551 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO),
552 .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
553 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS),
554 .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
549 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI), 555 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
550 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, 556 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
551 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO), 557 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 157c62775053..e6fce23b121a 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1782,6 +1782,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
1782 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) }, 1782 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
1783 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) }, 1783 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
1784 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) }, 1784 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
1785 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) },
1786 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) },
1787 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) },
1785 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) }, 1788 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
1786 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) }, 1789 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
1787 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) }, 1790 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
@@ -2463,6 +2466,9 @@ static const struct hid_device_id hid_mouse_ignore_list[] = {
2463 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) }, 2466 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
2464 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) }, 2467 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
2465 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) }, 2468 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
2469 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) },
2470 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) },
2471 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) },
2466 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, 2472 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
2467 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, 2473 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
2468 { } 2474 { }
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 3318de690e00..a2dbbbe0d8d7 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -356,6 +356,8 @@ static int cp2112_read(struct cp2112_device *dev, u8 *data, size_t size)
356 struct cp2112_force_read_report report; 356 struct cp2112_force_read_report report;
357 int ret; 357 int ret;
358 358
359 if (size > sizeof(dev->read_data))
360 size = sizeof(dev->read_data);
359 report.report = CP2112_DATA_READ_FORCE_SEND; 361 report.report = CP2112_DATA_READ_FORCE_SEND;
360 report.length = cpu_to_be16(size); 362 report.length = cpu_to_be16(size);
361 363
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index b04b0820d816..b3b225b75d0a 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -142,6 +142,9 @@
142#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290 142#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
143#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291 143#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
144#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292 144#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
145#define USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI 0x0272
146#define USB_DEVICE_ID_APPLE_WELLSPRING9_ISO 0x0273
147#define USB_DEVICE_ID_APPLE_WELLSPRING9_JIS 0x0274
145#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a 148#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a
146#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b 149#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b
147#define USB_DEVICE_ID_APPLE_IRCONTROL 0x8240 150#define USB_DEVICE_ID_APPLE_IRCONTROL 0x8240
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 3511bbaba505..e3c63640df73 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -462,12 +462,15 @@ out:
462 462
463static void hidinput_cleanup_battery(struct hid_device *dev) 463static void hidinput_cleanup_battery(struct hid_device *dev)
464{ 464{
465 const struct power_supply_desc *psy_desc;
466
465 if (!dev->battery) 467 if (!dev->battery)
466 return; 468 return;
467 469
470 psy_desc = dev->battery->desc;
468 power_supply_unregister(dev->battery); 471 power_supply_unregister(dev->battery);
469 kfree(dev->battery->desc->name); 472 kfree(psy_desc->name);
470 kfree(dev->battery->desc); 473 kfree(psy_desc);
471 dev->battery = NULL; 474 dev->battery = NULL;
472} 475}
473#else /* !CONFIG_HID_BATTERY_STRENGTH */ 476#else /* !CONFIG_HID_BATTERY_STRENGTH */
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 6a9b05b328a9..7c811252c1ce 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -778,9 +778,16 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
778 /* 778 /*
779 * some egalax touchscreens have "application == HID_DG_TOUCHSCREEN" 779 * some egalax touchscreens have "application == HID_DG_TOUCHSCREEN"
780 * for the stylus. 780 * for the stylus.
781 * The check for mt_report_id ensures we don't process
782 * HID_DG_CONTACTCOUNT from the pen report as it is outside the physical
783 * collection, but within the report ID.
781 */ 784 */
782 if (field->physical == HID_DG_STYLUS) 785 if (field->physical == HID_DG_STYLUS)
783 return 0; 786 return 0;
787 else if ((field->physical == 0) &&
788 (field->report->id != td->mt_report_id) &&
789 (td->mt_report_id != -1))
790 return 0;
784 791
785 if (field->application == HID_DG_TOUCHSCREEN || 792 if (field->application == HID_DG_TOUCHSCREEN ||
786 field->application == HID_DG_TOUCHPAD) 793 field->application == HID_DG_TOUCHPAD)
diff --git a/drivers/hid/hid-uclogic.c b/drivers/hid/hid-uclogic.c
index 94167310e15a..b905d501e752 100644
--- a/drivers/hid/hid-uclogic.c
+++ b/drivers/hid/hid-uclogic.c
@@ -858,7 +858,7 @@ static int uclogic_tablet_enable(struct hid_device *hdev)
858 for (p = drvdata->rdesc; 858 for (p = drvdata->rdesc;
859 p <= drvdata->rdesc + drvdata->rsize - 4;) { 859 p <= drvdata->rdesc + drvdata->rsize - 4;) {
860 if (p[0] == 0xFE && p[1] == 0xED && p[2] == 0x1D && 860 if (p[0] == 0xFE && p[1] == 0xED && p[2] == 0x1D &&
861 p[3] < sizeof(params)) { 861 p[3] < ARRAY_SIZE(params)) {
862 v = params[p[3]]; 862 v = params[p[3]];
863 put_unaligned(cpu_to_le32(v), (s32 *)p); 863 put_unaligned(cpu_to_le32(v), (s32 *)p);
864 p += 4; 864 p += 4;
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 53e7de7cb9e2..20f9a653444c 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -87,6 +87,9 @@ static const struct hid_blacklist {
87 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A, HID_QUIRK_ALWAYS_POLL }, 87 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A, HID_QUIRK_ALWAYS_POLL },
88 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A, HID_QUIRK_ALWAYS_POLL }, 88 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A, HID_QUIRK_ALWAYS_POLL },
89 { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, 89 { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
90 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2, HID_QUIRK_NO_INIT_REPORTS },
91 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2, HID_QUIRK_NO_INIT_REPORTS },
92 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2, HID_QUIRK_NO_INIT_REPORTS },
90 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS }, 93 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS },
91 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP, HID_QUIRK_NO_INIT_REPORTS }, 94 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP, HID_QUIRK_NO_INIT_REPORTS },
92 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER, HID_QUIRK_NO_INIT_REPORTS }, 95 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 4c0ffca97bef..01b937e63cf3 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -1271,17 +1271,52 @@ fail_leds:
1271 pad_input_dev = NULL; 1271 pad_input_dev = NULL;
1272 wacom_wac->pad_registered = false; 1272 wacom_wac->pad_registered = false;
1273fail_register_pad_input: 1273fail_register_pad_input:
1274 input_unregister_device(touch_input_dev); 1274 if (touch_input_dev)
1275 input_unregister_device(touch_input_dev);
1275 wacom_wac->touch_input = NULL; 1276 wacom_wac->touch_input = NULL;
1276 wacom_wac->touch_registered = false; 1277 wacom_wac->touch_registered = false;
1277fail_register_touch_input: 1278fail_register_touch_input:
1278 input_unregister_device(pen_input_dev); 1279 if (pen_input_dev)
1280 input_unregister_device(pen_input_dev);
1279 wacom_wac->pen_input = NULL; 1281 wacom_wac->pen_input = NULL;
1280 wacom_wac->pen_registered = false; 1282 wacom_wac->pen_registered = false;
1281fail_register_pen_input: 1283fail_register_pen_input:
1282 return error; 1284 return error;
1283} 1285}
1284 1286
1287/*
1288 * Not all devices report physical dimensions from HID.
1289 * Compute the default from hardcoded logical dimension
1290 * and resolution before driver overwrites them.
1291 */
1292static void wacom_set_default_phy(struct wacom_features *features)
1293{
1294 if (features->x_resolution) {
1295 features->x_phy = (features->x_max * 100) /
1296 features->x_resolution;
1297 features->y_phy = (features->y_max * 100) /
1298 features->y_resolution;
1299 }
1300}
1301
1302static void wacom_calculate_res(struct wacom_features *features)
1303{
1304 /* set unit to "100th of a mm" for devices not reported by HID */
1305 if (!features->unit) {
1306 features->unit = 0x11;
1307 features->unitExpo = -3;
1308 }
1309
1310 features->x_resolution = wacom_calc_hid_res(features->x_max,
1311 features->x_phy,
1312 features->unit,
1313 features->unitExpo);
1314 features->y_resolution = wacom_calc_hid_res(features->y_max,
1315 features->y_phy,
1316 features->unit,
1317 features->unitExpo);
1318}
1319
1285static void wacom_wireless_work(struct work_struct *work) 1320static void wacom_wireless_work(struct work_struct *work)
1286{ 1321{
1287 struct wacom *wacom = container_of(work, struct wacom, work); 1322 struct wacom *wacom = container_of(work, struct wacom, work);
@@ -1339,6 +1374,8 @@ static void wacom_wireless_work(struct work_struct *work)
1339 if (wacom_wac1->features.type != INTUOSHT && 1374 if (wacom_wac1->features.type != INTUOSHT &&
1340 wacom_wac1->features.type != BAMBOO_PT) 1375 wacom_wac1->features.type != BAMBOO_PT)
1341 wacom_wac1->features.device_type |= WACOM_DEVICETYPE_PAD; 1376 wacom_wac1->features.device_type |= WACOM_DEVICETYPE_PAD;
1377 wacom_set_default_phy(&wacom_wac1->features);
1378 wacom_calculate_res(&wacom_wac1->features);
1342 snprintf(wacom_wac1->pen_name, WACOM_NAME_MAX, "%s (WL) Pen", 1379 snprintf(wacom_wac1->pen_name, WACOM_NAME_MAX, "%s (WL) Pen",
1343 wacom_wac1->features.name); 1380 wacom_wac1->features.name);
1344 snprintf(wacom_wac1->pad_name, WACOM_NAME_MAX, "%s (WL) Pad", 1381 snprintf(wacom_wac1->pad_name, WACOM_NAME_MAX, "%s (WL) Pad",
@@ -1357,7 +1394,9 @@ static void wacom_wireless_work(struct work_struct *work)
1357 wacom_wac2->features = 1394 wacom_wac2->features =
1358 *((struct wacom_features *)id->driver_data); 1395 *((struct wacom_features *)id->driver_data);
1359 wacom_wac2->features.pktlen = WACOM_PKGLEN_BBTOUCH3; 1396 wacom_wac2->features.pktlen = WACOM_PKGLEN_BBTOUCH3;
1397 wacom_set_default_phy(&wacom_wac2->features);
1360 wacom_wac2->features.x_max = wacom_wac2->features.y_max = 4096; 1398 wacom_wac2->features.x_max = wacom_wac2->features.y_max = 4096;
1399 wacom_calculate_res(&wacom_wac2->features);
1361 snprintf(wacom_wac2->touch_name, WACOM_NAME_MAX, 1400 snprintf(wacom_wac2->touch_name, WACOM_NAME_MAX,
1362 "%s (WL) Finger",wacom_wac2->features.name); 1401 "%s (WL) Finger",wacom_wac2->features.name);
1363 snprintf(wacom_wac2->pad_name, WACOM_NAME_MAX, 1402 snprintf(wacom_wac2->pad_name, WACOM_NAME_MAX,
@@ -1405,39 +1444,6 @@ void wacom_battery_work(struct work_struct *work)
1405 } 1444 }
1406} 1445}
1407 1446
1408/*
1409 * Not all devices report physical dimensions from HID.
1410 * Compute the default from hardcoded logical dimension
1411 * and resolution before driver overwrites them.
1412 */
1413static void wacom_set_default_phy(struct wacom_features *features)
1414{
1415 if (features->x_resolution) {
1416 features->x_phy = (features->x_max * 100) /
1417 features->x_resolution;
1418 features->y_phy = (features->y_max * 100) /
1419 features->y_resolution;
1420 }
1421}
1422
1423static void wacom_calculate_res(struct wacom_features *features)
1424{
1425 /* set unit to "100th of a mm" for devices not reported by HID */
1426 if (!features->unit) {
1427 features->unit = 0x11;
1428 features->unitExpo = -3;
1429 }
1430
1431 features->x_resolution = wacom_calc_hid_res(features->x_max,
1432 features->x_phy,
1433 features->unit,
1434 features->unitExpo);
1435 features->y_resolution = wacom_calc_hid_res(features->y_max,
1436 features->y_phy,
1437 features->unit,
1438 features->unitExpo);
1439}
1440
1441static size_t wacom_compute_pktlen(struct hid_device *hdev) 1447static size_t wacom_compute_pktlen(struct hid_device *hdev)
1442{ 1448{
1443 struct hid_report_enum *report_enum; 1449 struct hid_report_enum *report_enum;
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 232da89f4e88..0d244239e55d 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2213,6 +2213,9 @@ void wacom_setup_device_quirks(struct wacom *wacom)
2213 features->x_max = 4096; 2213 features->x_max = 4096;
2214 features->y_max = 4096; 2214 features->y_max = 4096;
2215 } 2215 }
2216 else if (features->pktlen == WACOM_PKGLEN_BBTOUCH) {
2217 features->device_type |= WACOM_DEVICETYPE_PAD;
2218 }
2216 } 2219 }
2217 2220
2218 /* 2221 /*
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index 37c16afe007a..c8487894b312 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -929,6 +929,21 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = {
929 929
930MODULE_DEVICE_TABLE(dmi, i8k_dmi_table); 930MODULE_DEVICE_TABLE(dmi, i8k_dmi_table);
931 931
932static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
933 {
934 /*
935 * CPU fan speed going up and down on Dell Studio XPS 8100
936 * for unknown reasons.
937 */
938 .ident = "Dell Studio XPS 8100",
939 .matches = {
940 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
941 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8100"),
942 },
943 },
944 { }
945};
946
932/* 947/*
933 * Probe for the presence of a supported laptop. 948 * Probe for the presence of a supported laptop.
934 */ 949 */
@@ -940,7 +955,8 @@ static int __init i8k_probe(void)
940 /* 955 /*
941 * Get DMI information 956 * Get DMI information
942 */ 957 */
943 if (!dmi_check_system(i8k_dmi_table)) { 958 if (!dmi_check_system(i8k_dmi_table) ||
959 dmi_check_system(i8k_blacklist_dmi_table)) {
944 if (!ignore_dmi && !force) 960 if (!ignore_dmi && !force)
945 return -ENODEV; 961 return -ENODEV;
946 962
diff --git a/drivers/hwmon/g762.c b/drivers/hwmon/g762.c
index 9b55e673b67c..85d106fe3ce8 100644
--- a/drivers/hwmon/g762.c
+++ b/drivers/hwmon/g762.c
@@ -582,6 +582,7 @@ static const struct of_device_id g762_dt_match[] = {
582 { .compatible = "gmt,g763" }, 582 { .compatible = "gmt,g763" },
583 { }, 583 { },
584}; 584};
585MODULE_DEVICE_TABLE(of, g762_dt_match);
585 586
586/* 587/*
587 * Grab clock (a required property), enable it, get (fixed) clock frequency 588 * Grab clock (a required property), enable it, get (fixed) clock frequency
diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
index 28fcb2e246d5..fbfc02bb2cfa 100644
--- a/drivers/hwmon/nct7802.c
+++ b/drivers/hwmon/nct7802.c
@@ -195,7 +195,7 @@ abort:
195} 195}
196 196
197static int nct7802_write_voltage(struct nct7802_data *data, int nr, int index, 197static int nct7802_write_voltage(struct nct7802_data *data, int nr, int index,
198 unsigned int voltage) 198 unsigned long voltage)
199{ 199{
200 int shift = 8 - REG_VOLTAGE_LIMIT_MSB_SHIFT[index - 1][nr]; 200 int shift = 8 - REG_VOLTAGE_LIMIT_MSB_SHIFT[index - 1][nr];
201 int err; 201 int err;
diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c
index b77b82f24480..08ff89d222e5 100644
--- a/drivers/hwmon/nct7904.c
+++ b/drivers/hwmon/nct7904.c
@@ -412,8 +412,9 @@ static ssize_t show_pwm(struct device *dev,
412 return sprintf(buf, "%d\n", val); 412 return sprintf(buf, "%d\n", val);
413} 413}
414 414
415static ssize_t store_mode(struct device *dev, struct device_attribute *devattr, 415static ssize_t store_enable(struct device *dev,
416 const char *buf, size_t count) 416 struct device_attribute *devattr,
417 const char *buf, size_t count)
417{ 418{
418 int index = to_sensor_dev_attr(devattr)->index; 419 int index = to_sensor_dev_attr(devattr)->index;
419 struct nct7904_data *data = dev_get_drvdata(dev); 420 struct nct7904_data *data = dev_get_drvdata(dev);
@@ -422,18 +423,18 @@ static ssize_t store_mode(struct device *dev, struct device_attribute *devattr,
422 423
423 if (kstrtoul(buf, 10, &val) < 0) 424 if (kstrtoul(buf, 10, &val) < 0)
424 return -EINVAL; 425 return -EINVAL;
425 if (val > 1 || (val && !data->fan_mode[index])) 426 if (val < 1 || val > 2 || (val == 2 && !data->fan_mode[index]))
426 return -EINVAL; 427 return -EINVAL;
427 428
428 ret = nct7904_write_reg(data, BANK_3, FANCTL1_FMR_REG + index, 429 ret = nct7904_write_reg(data, BANK_3, FANCTL1_FMR_REG + index,
429 val ? data->fan_mode[index] : 0); 430 val == 2 ? data->fan_mode[index] : 0);
430 431
431 return ret ? ret : count; 432 return ret ? ret : count;
432} 433}
433 434
434/* Return 0 for manual mode or 1 for SmartFan mode */ 435/* Return 1 for manual mode or 2 for SmartFan mode */
435static ssize_t show_mode(struct device *dev, 436static ssize_t show_enable(struct device *dev,
436 struct device_attribute *devattr, char *buf) 437 struct device_attribute *devattr, char *buf)
437{ 438{
438 int index = to_sensor_dev_attr(devattr)->index; 439 int index = to_sensor_dev_attr(devattr)->index;
439 struct nct7904_data *data = dev_get_drvdata(dev); 440 struct nct7904_data *data = dev_get_drvdata(dev);
@@ -443,36 +444,36 @@ static ssize_t show_mode(struct device *dev,
443 if (val < 0) 444 if (val < 0)
444 return val; 445 return val;
445 446
446 return sprintf(buf, "%d\n", val ? 1 : 0); 447 return sprintf(buf, "%d\n", val ? 2 : 1);
447} 448}
448 449
449/* 2 attributes per channel: pwm and mode */ 450/* 2 attributes per channel: pwm and mode */
450static SENSOR_DEVICE_ATTR(fan1_pwm, S_IRUGO | S_IWUSR, 451static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR,
451 show_pwm, store_pwm, 0); 452 show_pwm, store_pwm, 0);
452static SENSOR_DEVICE_ATTR(fan1_mode, S_IRUGO | S_IWUSR, 453static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
453 show_mode, store_mode, 0); 454 show_enable, store_enable, 0);
454static SENSOR_DEVICE_ATTR(fan2_pwm, S_IRUGO | S_IWUSR, 455static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR,
455 show_pwm, store_pwm, 1); 456 show_pwm, store_pwm, 1);
456static SENSOR_DEVICE_ATTR(fan2_mode, S_IRUGO | S_IWUSR, 457static SENSOR_DEVICE_ATTR(pwm2_enable, S_IRUGO | S_IWUSR,
457 show_mode, store_mode, 1); 458 show_enable, store_enable, 1);
458static SENSOR_DEVICE_ATTR(fan3_pwm, S_IRUGO | S_IWUSR, 459static SENSOR_DEVICE_ATTR(pwm3, S_IRUGO | S_IWUSR,
459 show_pwm, store_pwm, 2); 460 show_pwm, store_pwm, 2);
460static SENSOR_DEVICE_ATTR(fan3_mode, S_IRUGO | S_IWUSR, 461static SENSOR_DEVICE_ATTR(pwm3_enable, S_IRUGO | S_IWUSR,
461 show_mode, store_mode, 2); 462 show_enable, store_enable, 2);
462static SENSOR_DEVICE_ATTR(fan4_pwm, S_IRUGO | S_IWUSR, 463static SENSOR_DEVICE_ATTR(pwm4, S_IRUGO | S_IWUSR,
463 show_pwm, store_pwm, 3); 464 show_pwm, store_pwm, 3);
464static SENSOR_DEVICE_ATTR(fan4_mode, S_IRUGO | S_IWUSR, 465static SENSOR_DEVICE_ATTR(pwm4_enable, S_IRUGO | S_IWUSR,
465 show_mode, store_mode, 3); 466 show_enable, store_enable, 3);
466 467
467static struct attribute *nct7904_fanctl_attrs[] = { 468static struct attribute *nct7904_fanctl_attrs[] = {
468 &sensor_dev_attr_fan1_pwm.dev_attr.attr, 469 &sensor_dev_attr_pwm1.dev_attr.attr,
469 &sensor_dev_attr_fan1_mode.dev_attr.attr, 470 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
470 &sensor_dev_attr_fan2_pwm.dev_attr.attr, 471 &sensor_dev_attr_pwm2.dev_attr.attr,
471 &sensor_dev_attr_fan2_mode.dev_attr.attr, 472 &sensor_dev_attr_pwm2_enable.dev_attr.attr,
472 &sensor_dev_attr_fan3_pwm.dev_attr.attr, 473 &sensor_dev_attr_pwm3.dev_attr.attr,
473 &sensor_dev_attr_fan3_mode.dev_attr.attr, 474 &sensor_dev_attr_pwm3_enable.dev_attr.attr,
474 &sensor_dev_attr_fan4_pwm.dev_attr.attr, 475 &sensor_dev_attr_pwm4.dev_attr.attr,
475 &sensor_dev_attr_fan4_mode.dev_attr.attr, 476 &sensor_dev_attr_pwm4_enable.dev_attr.attr,
476 NULL 477 NULL
477}; 478};
478 479
@@ -574,6 +575,7 @@ static const struct i2c_device_id nct7904_id[] = {
574 {"nct7904", 0}, 575 {"nct7904", 0},
575 {} 576 {}
576}; 577};
578MODULE_DEVICE_TABLE(i2c, nct7904_id);
577 579
578static struct i2c_driver nct7904_driver = { 580static struct i2c_driver nct7904_driver = {
579 .class = I2C_CLASS_HWMON, 581 .class = I2C_CLASS_HWMON,
diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c
index af162b4c7a6d..025686d41640 100644
--- a/drivers/i2c/busses/i2c-bfin-twi.c
+++ b/drivers/i2c/busses/i2c-bfin-twi.c
@@ -692,7 +692,7 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev)
692 692
693 platform_set_drvdata(pdev, iface); 693 platform_set_drvdata(pdev, iface);
694 694
695 dev_info(&pdev->dev, "Blackfin BF5xx on-chip I2C TWI Contoller, " 695 dev_info(&pdev->dev, "Blackfin BF5xx on-chip I2C TWI Controller, "
696 "regs_base@%p\n", iface->regs_base); 696 "regs_base@%p\n", iface->regs_base);
697 697
698 return 0; 698 return 0;
@@ -735,6 +735,6 @@ subsys_initcall(i2c_bfin_twi_init);
735module_exit(i2c_bfin_twi_exit); 735module_exit(i2c_bfin_twi_exit);
736 736
737MODULE_AUTHOR("Bryan Wu, Sonic Zhang"); 737MODULE_AUTHOR("Bryan Wu, Sonic Zhang");
738MODULE_DESCRIPTION("Blackfin BF5xx on-chip I2C TWI Contoller Driver"); 738MODULE_DESCRIPTION("Blackfin BF5xx on-chip I2C TWI Controller Driver");
739MODULE_LICENSE("GPL"); 739MODULE_LICENSE("GPL");
740MODULE_ALIAS("platform:i2c-bfin-twi"); 740MODULE_ALIAS("platform:i2c-bfin-twi");
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index d1c22e3fdd14..fc9bf7f30e35 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1247,7 +1247,14 @@ static void omap_i2c_prepare_recovery(struct i2c_adapter *adap)
1247 u32 reg; 1247 u32 reg;
1248 1248
1249 reg = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG); 1249 reg = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG);
1250 /* enable test mode */
1250 reg |= OMAP_I2C_SYSTEST_ST_EN; 1251 reg |= OMAP_I2C_SYSTEST_ST_EN;
1252 /* select SDA/SCL IO mode */
1253 reg |= 3 << OMAP_I2C_SYSTEST_TMODE_SHIFT;
1254 /* set SCL to high-impedance state (reset value is 0) */
1255 reg |= OMAP_I2C_SYSTEST_SCL_O;
1256 /* set SDA to high-impedance state (reset value is 0) */
1257 reg |= OMAP_I2C_SYSTEST_SDA_O;
1251 omap_i2c_write_reg(dev, OMAP_I2C_SYSTEST_REG, reg); 1258 omap_i2c_write_reg(dev, OMAP_I2C_SYSTEST_REG, reg);
1252} 1259}
1253 1260
@@ -1257,7 +1264,11 @@ static void omap_i2c_unprepare_recovery(struct i2c_adapter *adap)
1257 u32 reg; 1264 u32 reg;
1258 1265
1259 reg = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG); 1266 reg = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG);
1267 /* restore reset values */
1260 reg &= ~OMAP_I2C_SYSTEST_ST_EN; 1268 reg &= ~OMAP_I2C_SYSTEST_ST_EN;
1269 reg &= ~OMAP_I2C_SYSTEST_TMODE_MASK;
1270 reg &= ~OMAP_I2C_SYSTEST_SCL_O;
1271 reg &= ~OMAP_I2C_SYSTEST_SDA_O;
1261 omap_i2c_write_reg(dev, OMAP_I2C_SYSTEST_REG, reg); 1272 omap_i2c_write_reg(dev, OMAP_I2C_SYSTEST_REG, reg);
1262} 1273}
1263 1274
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index e6d4935161e4..c83e4d13cfc5 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -567,6 +567,9 @@ static int i2c_generic_recovery(struct i2c_adapter *adap)
567 if (bri->prepare_recovery) 567 if (bri->prepare_recovery)
568 bri->prepare_recovery(adap); 568 bri->prepare_recovery(adap);
569 569
570 bri->set_scl(adap, val);
571 ndelay(RECOVERY_NDELAY);
572
570 /* 573 /*
571 * By this time SCL is high, as we need to give 9 falling-rising edges 574 * By this time SCL is high, as we need to give 9 falling-rising edges
572 */ 575 */
@@ -597,7 +600,6 @@ static int i2c_generic_recovery(struct i2c_adapter *adap)
597 600
598int i2c_generic_scl_recovery(struct i2c_adapter *adap) 601int i2c_generic_scl_recovery(struct i2c_adapter *adap)
599{ 602{
600 adap->bus_recovery_info->set_scl(adap, 1);
601 return i2c_generic_recovery(adap); 603 return i2c_generic_recovery(adap);
602} 604}
603EXPORT_SYMBOL_GPL(i2c_generic_scl_recovery); 605EXPORT_SYMBOL_GPL(i2c_generic_scl_recovery);
@@ -1338,13 +1340,17 @@ static int of_dev_node_match(struct device *dev, void *data)
1338struct i2c_client *of_find_i2c_device_by_node(struct device_node *node) 1340struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
1339{ 1341{
1340 struct device *dev; 1342 struct device *dev;
1343 struct i2c_client *client;
1341 1344
1342 dev = bus_find_device(&i2c_bus_type, NULL, node, 1345 dev = bus_find_device(&i2c_bus_type, NULL, node, of_dev_node_match);
1343 of_dev_node_match);
1344 if (!dev) 1346 if (!dev)
1345 return NULL; 1347 return NULL;
1346 1348
1347 return i2c_verify_client(dev); 1349 client = i2c_verify_client(dev);
1350 if (!client)
1351 put_device(dev);
1352
1353 return client;
1348} 1354}
1349EXPORT_SYMBOL(of_find_i2c_device_by_node); 1355EXPORT_SYMBOL(of_find_i2c_device_by_node);
1350 1356
@@ -1352,13 +1358,17 @@ EXPORT_SYMBOL(of_find_i2c_device_by_node);
1352struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node) 1358struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node)
1353{ 1359{
1354 struct device *dev; 1360 struct device *dev;
1361 struct i2c_adapter *adapter;
1355 1362
1356 dev = bus_find_device(&i2c_bus_type, NULL, node, 1363 dev = bus_find_device(&i2c_bus_type, NULL, node, of_dev_node_match);
1357 of_dev_node_match);
1358 if (!dev) 1364 if (!dev)
1359 return NULL; 1365 return NULL;
1360 1366
1361 return i2c_verify_adapter(dev); 1367 adapter = i2c_verify_adapter(dev);
1368 if (!adapter)
1369 put_device(dev);
1370
1371 return adapter;
1362} 1372}
1363EXPORT_SYMBOL(of_find_i2c_adapter_by_node); 1373EXPORT_SYMBOL(of_find_i2c_adapter_by_node);
1364#else 1374#else
diff --git a/drivers/i2c/i2c-slave-eeprom.c b/drivers/i2c/i2c-slave-eeprom.c
index 822374654609..1da449614779 100644
--- a/drivers/i2c/i2c-slave-eeprom.c
+++ b/drivers/i2c/i2c-slave-eeprom.c
@@ -80,9 +80,6 @@ static ssize_t i2c_slave_eeprom_bin_read(struct file *filp, struct kobject *kobj
80 struct eeprom_data *eeprom; 80 struct eeprom_data *eeprom;
81 unsigned long flags; 81 unsigned long flags;
82 82
83 if (off + count > attr->size)
84 return -EFBIG;
85
86 eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj)); 83 eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj));
87 84
88 spin_lock_irqsave(&eeprom->buffer_lock, flags); 85 spin_lock_irqsave(&eeprom->buffer_lock, flags);
@@ -98,9 +95,6 @@ static ssize_t i2c_slave_eeprom_bin_write(struct file *filp, struct kobject *kob
98 struct eeprom_data *eeprom; 95 struct eeprom_data *eeprom;
99 unsigned long flags; 96 unsigned long flags;
100 97
101 if (off + count > attr->size)
102 return -EFBIG;
103
104 eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj)); 98 eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj));
105 99
106 spin_lock_irqsave(&eeprom->buffer_lock, flags); 100 spin_lock_irqsave(&eeprom->buffer_lock, flags);
diff --git a/drivers/iio/accel/bmc150-accel.c b/drivers/iio/accel/bmc150-accel.c
index 4e70f51c2370..cc5a35750b50 100644
--- a/drivers/iio/accel/bmc150-accel.c
+++ b/drivers/iio/accel/bmc150-accel.c
@@ -1464,7 +1464,7 @@ static void bmc150_accel_unregister_triggers(struct bmc150_accel_data *data,
1464{ 1464{
1465 int i; 1465 int i;
1466 1466
1467 for (i = from; i >= 0; i++) { 1467 for (i = from; i >= 0; i--) {
1468 if (data->triggers[i].indio_trig) { 1468 if (data->triggers[i].indio_trig) {
1469 iio_trigger_unregister(data->triggers[i].indio_trig); 1469 iio_trigger_unregister(data->triggers[i].indio_trig);
1470 data->triggers[i].indio_trig = NULL; 1470 data->triggers[i].indio_trig = NULL;
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index e8e2077c7244..13ea1ea23328 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -557,21 +557,21 @@ static void mma8452_transient_interrupt(struct iio_dev *indio_dev)
557 if (src & MMA8452_TRANSIENT_SRC_XTRANSE) 557 if (src & MMA8452_TRANSIENT_SRC_XTRANSE)
558 iio_push_event(indio_dev, 558 iio_push_event(indio_dev,
559 IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_X, 559 IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_X,
560 IIO_EV_TYPE_THRESH, 560 IIO_EV_TYPE_MAG,
561 IIO_EV_DIR_RISING), 561 IIO_EV_DIR_RISING),
562 ts); 562 ts);
563 563
564 if (src & MMA8452_TRANSIENT_SRC_YTRANSE) 564 if (src & MMA8452_TRANSIENT_SRC_YTRANSE)
565 iio_push_event(indio_dev, 565 iio_push_event(indio_dev,
566 IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Y, 566 IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Y,
567 IIO_EV_TYPE_THRESH, 567 IIO_EV_TYPE_MAG,
568 IIO_EV_DIR_RISING), 568 IIO_EV_DIR_RISING),
569 ts); 569 ts);
570 570
571 if (src & MMA8452_TRANSIENT_SRC_ZTRANSE) 571 if (src & MMA8452_TRANSIENT_SRC_ZTRANSE)
572 iio_push_event(indio_dev, 572 iio_push_event(indio_dev,
573 IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Z, 573 IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Z,
574 IIO_EV_TYPE_THRESH, 574 IIO_EV_TYPE_MAG,
575 IIO_EV_DIR_RISING), 575 IIO_EV_DIR_RISING),
576 ts); 576 ts);
577} 577}
@@ -644,7 +644,7 @@ static int mma8452_reg_access_dbg(struct iio_dev *indio_dev,
644 644
645static const struct iio_event_spec mma8452_transient_event[] = { 645static const struct iio_event_spec mma8452_transient_event[] = {
646 { 646 {
647 .type = IIO_EV_TYPE_THRESH, 647 .type = IIO_EV_TYPE_MAG,
648 .dir = IIO_EV_DIR_RISING, 648 .dir = IIO_EV_DIR_RISING,
649 .mask_separate = BIT(IIO_EV_INFO_ENABLE), 649 .mask_separate = BIT(IIO_EV_INFO_ENABLE),
650 .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) | 650 .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) |
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 7c5565891cb8..eb0cd897714a 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -153,8 +153,7 @@ config DA9150_GPADC
153 153
154config CC10001_ADC 154config CC10001_ADC
155 tristate "Cosmic Circuits 10001 ADC driver" 155 tristate "Cosmic Circuits 10001 ADC driver"
156 depends on HAVE_CLK || REGULATOR 156 depends on HAS_IOMEM && HAVE_CLK && REGULATOR
157 depends on HAS_IOMEM
158 select IIO_BUFFER 157 select IIO_BUFFER
159 select IIO_TRIGGERED_BUFFER 158 select IIO_TRIGGERED_BUFFER
160 help 159 help
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index 8a0eb4a04fb5..7b40925dd4ff 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -182,7 +182,7 @@ struct at91_adc_caps {
182 u8 ts_pen_detect_sensitivity; 182 u8 ts_pen_detect_sensitivity;
183 183
184 /* startup time calculate function */ 184 /* startup time calculate function */
185 u32 (*calc_startup_ticks)(u8 startup_time, u32 adc_clk_khz); 185 u32 (*calc_startup_ticks)(u32 startup_time, u32 adc_clk_khz);
186 186
187 u8 num_channels; 187 u8 num_channels;
188 struct at91_adc_reg_desc registers; 188 struct at91_adc_reg_desc registers;
@@ -201,7 +201,7 @@ struct at91_adc_state {
201 u8 num_channels; 201 u8 num_channels;
202 void __iomem *reg_base; 202 void __iomem *reg_base;
203 struct at91_adc_reg_desc *registers; 203 struct at91_adc_reg_desc *registers;
204 u8 startup_time; 204 u32 startup_time;
205 u8 sample_hold_time; 205 u8 sample_hold_time;
206 bool sleep_mode; 206 bool sleep_mode;
207 struct iio_trigger **trig; 207 struct iio_trigger **trig;
@@ -779,7 +779,7 @@ ret:
779 return ret; 779 return ret;
780} 780}
781 781
782static u32 calc_startup_ticks_9260(u8 startup_time, u32 adc_clk_khz) 782static u32 calc_startup_ticks_9260(u32 startup_time, u32 adc_clk_khz)
783{ 783{
784 /* 784 /*
785 * Number of ticks needed to cover the startup time of the ADC 785 * Number of ticks needed to cover the startup time of the ADC
@@ -790,7 +790,7 @@ static u32 calc_startup_ticks_9260(u8 startup_time, u32 adc_clk_khz)
790 return round_up((startup_time * adc_clk_khz / 1000) - 1, 8) / 8; 790 return round_up((startup_time * adc_clk_khz / 1000) - 1, 8) / 8;
791} 791}
792 792
793static u32 calc_startup_ticks_9x5(u8 startup_time, u32 adc_clk_khz) 793static u32 calc_startup_ticks_9x5(u32 startup_time, u32 adc_clk_khz)
794{ 794{
795 /* 795 /*
796 * For sama5d3x and at91sam9x5, the formula changes to: 796 * For sama5d3x and at91sam9x5, the formula changes to:
diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c
index 8d9c9b9215dd..d819823f7257 100644
--- a/drivers/iio/adc/mcp320x.c
+++ b/drivers/iio/adc/mcp320x.c
@@ -299,6 +299,8 @@ static int mcp320x_probe(struct spi_device *spi)
299 indio_dev->channels = chip_info->channels; 299 indio_dev->channels = chip_info->channels;
300 indio_dev->num_channels = chip_info->num_channels; 300 indio_dev->num_channels = chip_info->num_channels;
301 301
302 adc->chip_info = chip_info;
303
302 adc->transfer[0].tx_buf = &adc->tx_buf; 304 adc->transfer[0].tx_buf = &adc->tx_buf;
303 adc->transfer[0].len = sizeof(adc->tx_buf); 305 adc->transfer[0].len = sizeof(adc->tx_buf);
304 adc->transfer[1].rx_buf = adc->rx_buf; 306 adc->transfer[1].rx_buf = adc->rx_buf;
diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
index 8d4e019ea4ca..9c311c1e1ac7 100644
--- a/drivers/iio/adc/rockchip_saradc.c
+++ b/drivers/iio/adc/rockchip_saradc.c
@@ -349,3 +349,7 @@ static struct platform_driver rockchip_saradc_driver = {
349}; 349};
350 350
351module_platform_driver(rockchip_saradc_driver); 351module_platform_driver(rockchip_saradc_driver);
352
353MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
354MODULE_DESCRIPTION("Rockchip SARADC driver");
355MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/twl4030-madc.c b/drivers/iio/adc/twl4030-madc.c
index 06f4792240f0..ebe415f10640 100644
--- a/drivers/iio/adc/twl4030-madc.c
+++ b/drivers/iio/adc/twl4030-madc.c
@@ -833,7 +833,8 @@ static int twl4030_madc_probe(struct platform_device *pdev)
833 irq = platform_get_irq(pdev, 0); 833 irq = platform_get_irq(pdev, 0);
834 ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, 834 ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
835 twl4030_madc_threaded_irq_handler, 835 twl4030_madc_threaded_irq_handler,
836 IRQF_TRIGGER_RISING, "twl4030_madc", madc); 836 IRQF_TRIGGER_RISING | IRQF_ONESHOT,
837 "twl4030_madc", madc);
837 if (ret) { 838 if (ret) {
838 dev_err(&pdev->dev, "could not request irq\n"); 839 dev_err(&pdev->dev, "could not request irq\n");
839 goto err_i2c; 840 goto err_i2c;
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index 480f335a0f9f..819632bf1fda 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -635,7 +635,7 @@ static int vf610_adc_reg_access(struct iio_dev *indio_dev,
635 struct vf610_adc *info = iio_priv(indio_dev); 635 struct vf610_adc *info = iio_priv(indio_dev);
636 636
637 if ((readval == NULL) || 637 if ((readval == NULL) ||
638 (!(reg % 4) || (reg > VF610_REG_ADC_PCTL))) 638 ((reg % 4) || (reg > VF610_REG_ADC_PCTL)))
639 return -EINVAL; 639 return -EINVAL;
640 640
641 *readval = readl(info->regs + reg); 641 *readval = readl(info->regs + reg);
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index 610fc98f88ef..595511022795 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -36,6 +36,8 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
36 s32 poll_value = 0; 36 s32 poll_value = 0;
37 37
38 if (state) { 38 if (state) {
39 if (!atomic_read(&st->user_requested_state))
40 return 0;
39 if (sensor_hub_device_open(st->hsdev)) 41 if (sensor_hub_device_open(st->hsdev))
40 return -EIO; 42 return -EIO;
41 43
@@ -52,8 +54,12 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
52 54
53 poll_value = hid_sensor_read_poll_value(st); 55 poll_value = hid_sensor_read_poll_value(st);
54 } else { 56 } else {
55 if (!atomic_dec_and_test(&st->data_ready)) 57 int val;
58
59 val = atomic_dec_if_positive(&st->data_ready);
60 if (val < 0)
56 return 0; 61 return 0;
62
57 sensor_hub_device_close(st->hsdev); 63 sensor_hub_device_close(st->hsdev);
58 state_val = hid_sensor_get_usage_index(st->hsdev, 64 state_val = hid_sensor_get_usage_index(st->hsdev,
59 st->power_state.report_id, 65 st->power_state.report_id,
@@ -92,9 +98,11 @@ EXPORT_SYMBOL(hid_sensor_power_state);
92 98
93int hid_sensor_power_state(struct hid_sensor_common *st, bool state) 99int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
94{ 100{
101
95#ifdef CONFIG_PM 102#ifdef CONFIG_PM
96 int ret; 103 int ret;
97 104
105 atomic_set(&st->user_requested_state, state);
98 if (state) 106 if (state)
99 ret = pm_runtime_get_sync(&st->pdev->dev); 107 ret = pm_runtime_get_sync(&st->pdev->dev);
100 else { 108 else {
@@ -109,6 +117,7 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
109 117
110 return 0; 118 return 0;
111#else 119#else
120 atomic_set(&st->user_requested_state, state);
112 return _hid_sensor_power_state(st, state); 121 return _hid_sensor_power_state(st, state);
113#endif 122#endif
114} 123}
diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c
index 61bb9d4239ea..e98428df0d44 100644
--- a/drivers/iio/dac/ad5624r_spi.c
+++ b/drivers/iio/dac/ad5624r_spi.c
@@ -22,7 +22,7 @@
22#include "ad5624r.h" 22#include "ad5624r.h"
23 23
24static int ad5624r_spi_write(struct spi_device *spi, 24static int ad5624r_spi_write(struct spi_device *spi,
25 u8 cmd, u8 addr, u16 val, u8 len) 25 u8 cmd, u8 addr, u16 val, u8 shift)
26{ 26{
27 u32 data; 27 u32 data;
28 u8 msg[3]; 28 u8 msg[3];
@@ -35,7 +35,7 @@ static int ad5624r_spi_write(struct spi_device *spi,
35 * 14-, 12-bit input code followed by 0, 2, or 4 don't care bits, 35 * 14-, 12-bit input code followed by 0, 2, or 4 don't care bits,
36 * for the AD5664R, AD5644R, and AD5624R, respectively. 36 * for the AD5664R, AD5644R, and AD5624R, respectively.
37 */ 37 */
38 data = (0 << 22) | (cmd << 19) | (addr << 16) | (val << (16 - len)); 38 data = (0 << 22) | (cmd << 19) | (addr << 16) | (val << shift);
39 msg[0] = data >> 16; 39 msg[0] = data >> 16;
40 msg[1] = data >> 8; 40 msg[1] = data >> 8;
41 msg[2] = data; 41 msg[2] = data;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 17d4bb15be4d..65ce86837177 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -431,6 +431,23 @@ static int inv_mpu6050_write_gyro_scale(struct inv_mpu6050_state *st, int val)
431 return -EINVAL; 431 return -EINVAL;
432} 432}
433 433
434static int inv_write_raw_get_fmt(struct iio_dev *indio_dev,
435 struct iio_chan_spec const *chan, long mask)
436{
437 switch (mask) {
438 case IIO_CHAN_INFO_SCALE:
439 switch (chan->type) {
440 case IIO_ANGL_VEL:
441 return IIO_VAL_INT_PLUS_NANO;
442 default:
443 return IIO_VAL_INT_PLUS_MICRO;
444 }
445 default:
446 return IIO_VAL_INT_PLUS_MICRO;
447 }
448
449 return -EINVAL;
450}
434static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val) 451static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val)
435{ 452{
436 int result, i; 453 int result, i;
@@ -696,6 +713,7 @@ static const struct iio_info mpu_info = {
696 .driver_module = THIS_MODULE, 713 .driver_module = THIS_MODULE,
697 .read_raw = &inv_mpu6050_read_raw, 714 .read_raw = &inv_mpu6050_read_raw,
698 .write_raw = &inv_mpu6050_write_raw, 715 .write_raw = &inv_mpu6050_write_raw,
716 .write_raw_get_fmt = &inv_write_raw_get_fmt,
699 .attrs = &inv_attribute_group, 717 .attrs = &inv_attribute_group,
700 .validate_trigger = inv_mpu6050_validate_trigger, 718 .validate_trigger = inv_mpu6050_validate_trigger,
701}; 719};
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index e6198b7c9cbf..a5c59251ec0e 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -188,6 +188,7 @@ config SENSORS_LM3533
188config LTR501 188config LTR501
189 tristate "LTR-501ALS-01 light sensor" 189 tristate "LTR-501ALS-01 light sensor"
190 depends on I2C 190 depends on I2C
191 select REGMAP_I2C
191 select IIO_BUFFER 192 select IIO_BUFFER
192 select IIO_TRIGGERED_BUFFER 193 select IIO_TRIGGERED_BUFFER
193 help 194 help
@@ -201,6 +202,7 @@ config LTR501
201config STK3310 202config STK3310
202 tristate "STK3310 ALS and proximity sensor" 203 tristate "STK3310 ALS and proximity sensor"
203 depends on I2C 204 depends on I2C
205 select REGMAP_I2C
204 help 206 help
205 Say yes here to get support for the Sensortek STK3310 ambient light 207 Say yes here to get support for the Sensortek STK3310 ambient light
206 and proximity sensor. The STK3311 model is also supported by this 208 and proximity sensor. The STK3311 model is also supported by this
diff --git a/drivers/iio/light/cm3323.c b/drivers/iio/light/cm3323.c
index 869033e48a1f..a1d4905cc9d2 100644
--- a/drivers/iio/light/cm3323.c
+++ b/drivers/iio/light/cm3323.c
@@ -123,7 +123,7 @@ static int cm3323_set_it_bits(struct cm3323_data *data, int val, int val2)
123 for (i = 0; i < ARRAY_SIZE(cm3323_int_time); i++) { 123 for (i = 0; i < ARRAY_SIZE(cm3323_int_time); i++) {
124 if (val == cm3323_int_time[i].val && 124 if (val == cm3323_int_time[i].val &&
125 val2 == cm3323_int_time[i].val2) { 125 val2 == cm3323_int_time[i].val2) {
126 reg_conf = data->reg_conf; 126 reg_conf = data->reg_conf & ~CM3323_CONF_IT_MASK;
127 reg_conf |= i << CM3323_CONF_IT_SHIFT; 127 reg_conf |= i << CM3323_CONF_IT_SHIFT;
128 128
129 ret = i2c_smbus_write_word_data(data->client, 129 ret = i2c_smbus_write_word_data(data->client,
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index 1ef7d3773ab9..b5a0e66b5f28 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -1302,7 +1302,7 @@ static int ltr501_init(struct ltr501_data *data)
1302 if (ret < 0) 1302 if (ret < 0)
1303 return ret; 1303 return ret;
1304 1304
1305 data->als_contr = ret | data->chip_info->als_mode_active; 1305 data->als_contr = status | data->chip_info->als_mode_active;
1306 1306
1307 ret = regmap_read(data->regmap, LTR501_PS_CONTR, &status); 1307 ret = regmap_read(data->regmap, LTR501_PS_CONTR, &status);
1308 if (ret < 0) 1308 if (ret < 0)
diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c
index fee4297d7c8f..11a027adc204 100644
--- a/drivers/iio/light/stk3310.c
+++ b/drivers/iio/light/stk3310.c
@@ -43,7 +43,6 @@
43#define STK3311_CHIP_ID_VAL 0x1D 43#define STK3311_CHIP_ID_VAL 0x1D
44#define STK3310_PSINT_EN 0x01 44#define STK3310_PSINT_EN 0x01
45#define STK3310_PS_MAX_VAL 0xFFFF 45#define STK3310_PS_MAX_VAL 0xFFFF
46#define STK3310_THRESH_MAX 0xFFFF
47 46
48#define STK3310_DRIVER_NAME "stk3310" 47#define STK3310_DRIVER_NAME "stk3310"
49#define STK3310_REGMAP_NAME "stk3310_regmap" 48#define STK3310_REGMAP_NAME "stk3310_regmap"
@@ -84,15 +83,13 @@ static const struct reg_field stk3310_reg_field_flag_psint =
84 REG_FIELD(STK3310_REG_FLAG, 4, 4); 83 REG_FIELD(STK3310_REG_FLAG, 4, 4);
85static const struct reg_field stk3310_reg_field_flag_nf = 84static const struct reg_field stk3310_reg_field_flag_nf =
86 REG_FIELD(STK3310_REG_FLAG, 0, 0); 85 REG_FIELD(STK3310_REG_FLAG, 0, 0);
87/* 86
88 * Maximum PS values with regard to scale. Used to export the 'inverse' 87/* Estimate maximum proximity values with regard to measurement scale. */
89 * PS value (high values for far objects, low values for near objects).
90 */
91static const int stk3310_ps_max[4] = { 88static const int stk3310_ps_max[4] = {
92 STK3310_PS_MAX_VAL / 64, 89 STK3310_PS_MAX_VAL / 640,
93 STK3310_PS_MAX_VAL / 16, 90 STK3310_PS_MAX_VAL / 160,
94 STK3310_PS_MAX_VAL / 4, 91 STK3310_PS_MAX_VAL / 40,
95 STK3310_PS_MAX_VAL, 92 STK3310_PS_MAX_VAL / 10
96}; 93};
97 94
98static const int stk3310_scale_table[][2] = { 95static const int stk3310_scale_table[][2] = {
@@ -128,14 +125,14 @@ static const struct iio_event_spec stk3310_events[] = {
128 /* Proximity event */ 125 /* Proximity event */
129 { 126 {
130 .type = IIO_EV_TYPE_THRESH, 127 .type = IIO_EV_TYPE_THRESH,
131 .dir = IIO_EV_DIR_FALLING, 128 .dir = IIO_EV_DIR_RISING,
132 .mask_separate = BIT(IIO_EV_INFO_VALUE) | 129 .mask_separate = BIT(IIO_EV_INFO_VALUE) |
133 BIT(IIO_EV_INFO_ENABLE), 130 BIT(IIO_EV_INFO_ENABLE),
134 }, 131 },
135 /* Out-of-proximity event */ 132 /* Out-of-proximity event */
136 { 133 {
137 .type = IIO_EV_TYPE_THRESH, 134 .type = IIO_EV_TYPE_THRESH,
138 .dir = IIO_EV_DIR_RISING, 135 .dir = IIO_EV_DIR_FALLING,
139 .mask_separate = BIT(IIO_EV_INFO_VALUE) | 136 .mask_separate = BIT(IIO_EV_INFO_VALUE) |
140 BIT(IIO_EV_INFO_ENABLE), 137 BIT(IIO_EV_INFO_ENABLE),
141 }, 138 },
@@ -203,25 +200,18 @@ static int stk3310_read_event(struct iio_dev *indio_dev,
203 int *val, int *val2) 200 int *val, int *val2)
204{ 201{
205 u8 reg; 202 u8 reg;
206 u16 buf; 203 __be16 buf;
207 int ret; 204 int ret;
208 unsigned int index;
209 struct stk3310_data *data = iio_priv(indio_dev); 205 struct stk3310_data *data = iio_priv(indio_dev);
210 206
211 if (info != IIO_EV_INFO_VALUE) 207 if (info != IIO_EV_INFO_VALUE)
212 return -EINVAL; 208 return -EINVAL;
213 209
214 /* 210 /* Only proximity interrupts are implemented at the moment. */
215 * Only proximity interrupts are implemented at the moment.
216 * Since we're inverting proximity values, the sensor's 'high'
217 * threshold will become our 'low' threshold, associated with
218 * 'near' events. Similarly, the sensor's 'low' threshold will
219 * be our 'high' threshold, associated with 'far' events.
220 */
221 if (dir == IIO_EV_DIR_RISING) 211 if (dir == IIO_EV_DIR_RISING)
222 reg = STK3310_REG_THDL_PS;
223 else if (dir == IIO_EV_DIR_FALLING)
224 reg = STK3310_REG_THDH_PS; 212 reg = STK3310_REG_THDH_PS;
213 else if (dir == IIO_EV_DIR_FALLING)
214 reg = STK3310_REG_THDL_PS;
225 else 215 else
226 return -EINVAL; 216 return -EINVAL;
227 217
@@ -232,8 +222,7 @@ static int stk3310_read_event(struct iio_dev *indio_dev,
232 dev_err(&data->client->dev, "register read failed\n"); 222 dev_err(&data->client->dev, "register read failed\n");
233 return ret; 223 return ret;
234 } 224 }
235 regmap_field_read(data->reg_ps_gain, &index); 225 *val = be16_to_cpu(buf);
236 *val = swab16(stk3310_ps_max[index] - buf);
237 226
238 return IIO_VAL_INT; 227 return IIO_VAL_INT;
239} 228}
@@ -246,7 +235,7 @@ static int stk3310_write_event(struct iio_dev *indio_dev,
246 int val, int val2) 235 int val, int val2)
247{ 236{
248 u8 reg; 237 u8 reg;
249 u16 buf; 238 __be16 buf;
250 int ret; 239 int ret;
251 unsigned int index; 240 unsigned int index;
252 struct stk3310_data *data = iio_priv(indio_dev); 241 struct stk3310_data *data = iio_priv(indio_dev);
@@ -257,13 +246,13 @@ static int stk3310_write_event(struct iio_dev *indio_dev,
257 return -EINVAL; 246 return -EINVAL;
258 247
259 if (dir == IIO_EV_DIR_RISING) 248 if (dir == IIO_EV_DIR_RISING)
260 reg = STK3310_REG_THDL_PS;
261 else if (dir == IIO_EV_DIR_FALLING)
262 reg = STK3310_REG_THDH_PS; 249 reg = STK3310_REG_THDH_PS;
250 else if (dir == IIO_EV_DIR_FALLING)
251 reg = STK3310_REG_THDL_PS;
263 else 252 else
264 return -EINVAL; 253 return -EINVAL;
265 254
266 buf = swab16(stk3310_ps_max[index] - val); 255 buf = cpu_to_be16(val);
267 ret = regmap_bulk_write(data->regmap, reg, &buf, 2); 256 ret = regmap_bulk_write(data->regmap, reg, &buf, 2);
268 if (ret < 0) 257 if (ret < 0)
269 dev_err(&client->dev, "failed to set PS threshold!\n"); 258 dev_err(&client->dev, "failed to set PS threshold!\n");
@@ -312,7 +301,7 @@ static int stk3310_read_raw(struct iio_dev *indio_dev,
312 int *val, int *val2, long mask) 301 int *val, int *val2, long mask)
313{ 302{
314 u8 reg; 303 u8 reg;
315 u16 buf; 304 __be16 buf;
316 int ret; 305 int ret;
317 unsigned int index; 306 unsigned int index;
318 struct stk3310_data *data = iio_priv(indio_dev); 307 struct stk3310_data *data = iio_priv(indio_dev);
@@ -333,15 +322,7 @@ static int stk3310_read_raw(struct iio_dev *indio_dev,
333 mutex_unlock(&data->lock); 322 mutex_unlock(&data->lock);
334 return ret; 323 return ret;
335 } 324 }
336 *val = swab16(buf); 325 *val = be16_to_cpu(buf);
337 if (chan->type == IIO_PROXIMITY) {
338 /*
339 * Invert the proximity data so we return low values
340 * for close objects and high values for far ones.
341 */
342 regmap_field_read(data->reg_ps_gain, &index);
343 *val = stk3310_ps_max[index] - *val;
344 }
345 mutex_unlock(&data->lock); 326 mutex_unlock(&data->lock);
346 return IIO_VAL_INT; 327 return IIO_VAL_INT;
347 case IIO_CHAN_INFO_INT_TIME: 328 case IIO_CHAN_INFO_INT_TIME:
@@ -581,8 +562,8 @@ static irqreturn_t stk3310_irq_event_handler(int irq, void *private)
581 } 562 }
582 event = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 1, 563 event = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 1,
583 IIO_EV_TYPE_THRESH, 564 IIO_EV_TYPE_THRESH,
584 (dir ? IIO_EV_DIR_RISING : 565 (dir ? IIO_EV_DIR_FALLING :
585 IIO_EV_DIR_FALLING)); 566 IIO_EV_DIR_RISING));
586 iio_push_event(indio_dev, event, data->timestamp); 567 iio_push_event(indio_dev, event, data->timestamp);
587 568
588 /* Reset the interrupt flag */ 569 /* Reset the interrupt flag */
@@ -627,13 +608,7 @@ static int stk3310_probe(struct i2c_client *client,
627 if (ret < 0) 608 if (ret < 0)
628 return ret; 609 return ret;
629 610
630 ret = iio_device_register(indio_dev); 611 if (client->irq < 0)
631 if (ret < 0) {
632 dev_err(&client->dev, "device_register failed\n");
633 stk3310_set_state(data, STK3310_STATE_STANDBY);
634 }
635
636 if (client->irq <= 0)
637 client->irq = stk3310_gpio_probe(client); 612 client->irq = stk3310_gpio_probe(client);
638 613
639 if (client->irq >= 0) { 614 if (client->irq >= 0) {
@@ -648,6 +623,12 @@ static int stk3310_probe(struct i2c_client *client,
648 client->irq); 623 client->irq);
649 } 624 }
650 625
626 ret = iio_device_register(indio_dev);
627 if (ret < 0) {
628 dev_err(&client->dev, "device_register failed\n");
629 stk3310_set_state(data, STK3310_STATE_STANDBY);
630 }
631
651 return ret; 632 return ret;
652} 633}
653 634
diff --git a/drivers/iio/light/tcs3414.c b/drivers/iio/light/tcs3414.c
index 71c2bde275aa..f8b1df018abe 100644
--- a/drivers/iio/light/tcs3414.c
+++ b/drivers/iio/light/tcs3414.c
@@ -185,7 +185,7 @@ static int tcs3414_write_raw(struct iio_dev *indio_dev,
185 if (val != 0) 185 if (val != 0)
186 return -EINVAL; 186 return -EINVAL;
187 for (i = 0; i < ARRAY_SIZE(tcs3414_times); i++) { 187 for (i = 0; i < ARRAY_SIZE(tcs3414_times); i++) {
188 if (val == tcs3414_times[i] * 1000) { 188 if (val2 == tcs3414_times[i] * 1000) {
189 data->timing &= ~TCS3414_INTEG_MASK; 189 data->timing &= ~TCS3414_INTEG_MASK;
190 data->timing |= i; 190 data->timing |= i;
191 return i2c_smbus_write_byte_data( 191 return i2c_smbus_write_byte_data(
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index dcadfc4f0661..efb9350b0d76 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -90,6 +90,7 @@ config IIO_ST_MAGN_SPI_3AXIS
90config BMC150_MAGN 90config BMC150_MAGN
91 tristate "Bosch BMC150 Magnetometer Driver" 91 tristate "Bosch BMC150 Magnetometer Driver"
92 depends on I2C 92 depends on I2C
93 select REGMAP_I2C
93 select IIO_BUFFER 94 select IIO_BUFFER
94 select IIO_TRIGGERED_BUFFER 95 select IIO_TRIGGERED_BUFFER
95 help 96 help
diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c
index d4c178869991..1347a1f2e46f 100644
--- a/drivers/iio/magnetometer/bmc150_magn.c
+++ b/drivers/iio/magnetometer/bmc150_magn.c
@@ -706,11 +706,11 @@ static int bmc150_magn_init(struct bmc150_magn_data *data)
706 goto err_poweroff; 706 goto err_poweroff;
707 } 707 }
708 if (chip_id != BMC150_MAGN_CHIP_ID_VAL) { 708 if (chip_id != BMC150_MAGN_CHIP_ID_VAL) {
709 dev_err(&data->client->dev, "Invalid chip id 0x%x\n", ret); 709 dev_err(&data->client->dev, "Invalid chip id 0x%x\n", chip_id);
710 ret = -ENODEV; 710 ret = -ENODEV;
711 goto err_poweroff; 711 goto err_poweroff;
712 } 712 }
713 dev_dbg(&data->client->dev, "Chip id %x\n", ret); 713 dev_dbg(&data->client->dev, "Chip id %x\n", chip_id);
714 714
715 preset = bmc150_magn_presets_table[BMC150_MAGN_DEFAULT_PRESET]; 715 preset = bmc150_magn_presets_table[BMC150_MAGN_DEFAULT_PRESET];
716 ret = bmc150_magn_set_odr(data, preset.odr); 716 ret = bmc150_magn_set_odr(data, preset.odr);
diff --git a/drivers/iio/magnetometer/mmc35240.c b/drivers/iio/magnetometer/mmc35240.c
index 7a2ea71c659a..706ebfd6297f 100644
--- a/drivers/iio/magnetometer/mmc35240.c
+++ b/drivers/iio/magnetometer/mmc35240.c
@@ -84,10 +84,10 @@
84#define MMC35240_OTP_START_ADDR 0x1B 84#define MMC35240_OTP_START_ADDR 0x1B
85 85
86enum mmc35240_resolution { 86enum mmc35240_resolution {
87 MMC35240_16_BITS_SLOW = 0, /* 100 Hz */ 87 MMC35240_16_BITS_SLOW = 0, /* 7.92 ms */
88 MMC35240_16_BITS_FAST, /* 200 Hz */ 88 MMC35240_16_BITS_FAST, /* 4.08 ms */
89 MMC35240_14_BITS, /* 333 Hz */ 89 MMC35240_14_BITS, /* 2.16 ms */
90 MMC35240_12_BITS, /* 666 Hz */ 90 MMC35240_12_BITS, /* 1.20 ms */
91}; 91};
92 92
93enum mmc35240_axis { 93enum mmc35240_axis {
@@ -100,22 +100,22 @@ static const struct {
100 int sens[3]; /* sensitivity per X, Y, Z axis */ 100 int sens[3]; /* sensitivity per X, Y, Z axis */
101 int nfo; /* null field output */ 101 int nfo; /* null field output */
102} mmc35240_props_table[] = { 102} mmc35240_props_table[] = {
103 /* 16 bits, 100Hz ODR */ 103 /* 16 bits, 125Hz ODR */
104 { 104 {
105 {1024, 1024, 1024}, 105 {1024, 1024, 1024},
106 32768, 106 32768,
107 }, 107 },
108 /* 16 bits, 200Hz ODR */ 108 /* 16 bits, 250Hz ODR */
109 { 109 {
110 {1024, 1024, 770}, 110 {1024, 1024, 770},
111 32768, 111 32768,
112 }, 112 },
113 /* 14 bits, 333Hz ODR */ 113 /* 14 bits, 450Hz ODR */
114 { 114 {
115 {256, 256, 193}, 115 {256, 256, 193},
116 8192, 116 8192,
117 }, 117 },
118 /* 12 bits, 666Hz ODR */ 118 /* 12 bits, 800Hz ODR */
119 { 119 {
120 {64, 64, 48}, 120 {64, 64, 48},
121 2048, 121 2048,
@@ -133,9 +133,15 @@ struct mmc35240_data {
133 int axis_scale[3]; 133 int axis_scale[3];
134}; 134};
135 135
136static const int mmc35240_samp_freq[] = {100, 200, 333, 666}; 136static const struct {
137 int val;
138 int val2;
139} mmc35240_samp_freq[] = { {1, 500000},
140 {13, 0},
141 {25, 0},
142 {50, 0} };
137 143
138static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("100 200 333 666"); 144static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("1.5 13 25 50");
139 145
140#define MMC35240_CHANNEL(_axis) { \ 146#define MMC35240_CHANNEL(_axis) { \
141 .type = IIO_MAGN, \ 147 .type = IIO_MAGN, \
@@ -168,7 +174,8 @@ static int mmc35240_get_samp_freq_index(struct mmc35240_data *data,
168 int i; 174 int i;
169 175
170 for (i = 0; i < ARRAY_SIZE(mmc35240_samp_freq); i++) 176 for (i = 0; i < ARRAY_SIZE(mmc35240_samp_freq); i++)
171 if (mmc35240_samp_freq[i] == val) 177 if (mmc35240_samp_freq[i].val == val &&
178 mmc35240_samp_freq[i].val2 == val2)
172 return i; 179 return i;
173 return -EINVAL; 180 return -EINVAL;
174} 181}
@@ -195,8 +202,8 @@ static int mmc35240_hw_set(struct mmc35240_data *data, bool set)
195 coil_bit = MMC35240_CTRL0_RESET_BIT; 202 coil_bit = MMC35240_CTRL0_RESET_BIT;
196 203
197 return regmap_update_bits(data->regmap, MMC35240_REG_CTRL0, 204 return regmap_update_bits(data->regmap, MMC35240_REG_CTRL0,
198 MMC35240_CTRL0_REFILL_BIT, 205 coil_bit, coil_bit);
199 coil_bit); 206
200} 207}
201 208
202static int mmc35240_init(struct mmc35240_data *data) 209static int mmc35240_init(struct mmc35240_data *data)
@@ -215,14 +222,15 @@ static int mmc35240_init(struct mmc35240_data *data)
215 222
216 /* 223 /*
217 * make sure we restore sensor characteristics, by doing 224 * make sure we restore sensor characteristics, by doing
218 * a RESET/SET sequence 225 * a SET/RESET sequence, the axis polarity being naturally
226 * aligned after RESET
219 */ 227 */
220 ret = mmc35240_hw_set(data, false); 228 ret = mmc35240_hw_set(data, true);
221 if (ret < 0) 229 if (ret < 0)
222 return ret; 230 return ret;
223 usleep_range(MMC53240_WAIT_SET_RESET, MMC53240_WAIT_SET_RESET + 1); 231 usleep_range(MMC53240_WAIT_SET_RESET, MMC53240_WAIT_SET_RESET + 1);
224 232
225 ret = mmc35240_hw_set(data, true); 233 ret = mmc35240_hw_set(data, false);
226 if (ret < 0) 234 if (ret < 0)
227 return ret; 235 return ret;
228 236
@@ -378,9 +386,9 @@ static int mmc35240_read_raw(struct iio_dev *indio_dev,
378 if (i < 0 || i >= ARRAY_SIZE(mmc35240_samp_freq)) 386 if (i < 0 || i >= ARRAY_SIZE(mmc35240_samp_freq))
379 return -EINVAL; 387 return -EINVAL;
380 388
381 *val = mmc35240_samp_freq[i]; 389 *val = mmc35240_samp_freq[i].val;
382 *val2 = 0; 390 *val2 = mmc35240_samp_freq[i].val2;
383 return IIO_VAL_INT; 391 return IIO_VAL_INT_PLUS_MICRO;
384 default: 392 default:
385 return -EINVAL; 393 return -EINVAL;
386 } 394 }
@@ -496,6 +504,7 @@ static int mmc35240_probe(struct i2c_client *client,
496 } 504 }
497 505
498 data = iio_priv(indio_dev); 506 data = iio_priv(indio_dev);
507 i2c_set_clientdata(client, indio_dev);
499 data->client = client; 508 data->client = client;
500 data->regmap = regmap; 509 data->regmap = regmap;
501 data->res = MMC35240_16_BITS_SLOW; 510 data->res = MMC35240_16_BITS_SLOW;
diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c
index 2042e375f835..3d756bd8c703 100644
--- a/drivers/iio/proximity/sx9500.c
+++ b/drivers/iio/proximity/sx9500.c
@@ -80,6 +80,7 @@
80#define SX9500_COMPSTAT_MASK GENMASK(3, 0) 80#define SX9500_COMPSTAT_MASK GENMASK(3, 0)
81 81
82#define SX9500_NUM_CHANNELS 4 82#define SX9500_NUM_CHANNELS 4
83#define SX9500_CHAN_MASK GENMASK(SX9500_NUM_CHANNELS - 1, 0)
83 84
84struct sx9500_data { 85struct sx9500_data {
85 struct mutex mutex; 86 struct mutex mutex;
@@ -281,7 +282,7 @@ static int sx9500_read_prox_data(struct sx9500_data *data,
281 if (ret < 0) 282 if (ret < 0)
282 return ret; 283 return ret;
283 284
284 *val = 32767 - (s16)be16_to_cpu(regval); 285 *val = be16_to_cpu(regval);
285 286
286 return IIO_VAL_INT; 287 return IIO_VAL_INT;
287} 288}
@@ -329,27 +330,29 @@ static int sx9500_read_proximity(struct sx9500_data *data,
329 else 330 else
330 ret = sx9500_wait_for_sample(data); 331 ret = sx9500_wait_for_sample(data);
331 332
332 if (ret < 0)
333 return ret;
334
335 mutex_lock(&data->mutex); 333 mutex_lock(&data->mutex);
336 334
337 ret = sx9500_read_prox_data(data, chan, val);
338 if (ret < 0) 335 if (ret < 0)
339 goto out; 336 goto out_dec_data_rdy;
340 337
341 ret = sx9500_dec_chan_users(data, chan->channel); 338 ret = sx9500_read_prox_data(data, chan, val);
342 if (ret < 0) 339 if (ret < 0)
343 goto out; 340 goto out_dec_data_rdy;
344 341
345 ret = sx9500_dec_data_rdy_users(data); 342 ret = sx9500_dec_data_rdy_users(data);
346 if (ret < 0) 343 if (ret < 0)
344 goto out_dec_chan;
345
346 ret = sx9500_dec_chan_users(data, chan->channel);
347 if (ret < 0)
347 goto out; 348 goto out;
348 349
349 ret = IIO_VAL_INT; 350 ret = IIO_VAL_INT;
350 351
351 goto out; 352 goto out;
352 353
354out_dec_data_rdy:
355 sx9500_dec_data_rdy_users(data);
353out_dec_chan: 356out_dec_chan:
354 sx9500_dec_chan_users(data, chan->channel); 357 sx9500_dec_chan_users(data, chan->channel);
355out: 358out:
@@ -679,7 +682,7 @@ out:
679static int sx9500_buffer_preenable(struct iio_dev *indio_dev) 682static int sx9500_buffer_preenable(struct iio_dev *indio_dev)
680{ 683{
681 struct sx9500_data *data = iio_priv(indio_dev); 684 struct sx9500_data *data = iio_priv(indio_dev);
682 int ret, i; 685 int ret = 0, i;
683 686
684 mutex_lock(&data->mutex); 687 mutex_lock(&data->mutex);
685 688
@@ -703,7 +706,7 @@ static int sx9500_buffer_preenable(struct iio_dev *indio_dev)
703static int sx9500_buffer_predisable(struct iio_dev *indio_dev) 706static int sx9500_buffer_predisable(struct iio_dev *indio_dev)
704{ 707{
705 struct sx9500_data *data = iio_priv(indio_dev); 708 struct sx9500_data *data = iio_priv(indio_dev);
706 int ret, i; 709 int ret = 0, i;
707 710
708 iio_triggered_buffer_predisable(indio_dev); 711 iio_triggered_buffer_predisable(indio_dev);
709 712
@@ -800,8 +803,7 @@ static int sx9500_init_compensation(struct iio_dev *indio_dev)
800 unsigned int val; 803 unsigned int val;
801 804
802 ret = regmap_update_bits(data->regmap, SX9500_REG_PROX_CTRL0, 805 ret = regmap_update_bits(data->regmap, SX9500_REG_PROX_CTRL0,
803 GENMASK(SX9500_NUM_CHANNELS, 0), 806 SX9500_CHAN_MASK, SX9500_CHAN_MASK);
804 GENMASK(SX9500_NUM_CHANNELS, 0));
805 if (ret < 0) 807 if (ret < 0)
806 return ret; 808 return ret;
807 809
@@ -821,7 +823,7 @@ static int sx9500_init_compensation(struct iio_dev *indio_dev)
821 823
822out: 824out:
823 regmap_update_bits(data->regmap, SX9500_REG_PROX_CTRL0, 825 regmap_update_bits(data->regmap, SX9500_REG_PROX_CTRL0,
824 GENMASK(SX9500_NUM_CHANNELS, 0), 0); 826 SX9500_CHAN_MASK, 0);
825 return ret; 827 return ret;
826} 828}
827 829
diff --git a/drivers/iio/temperature/mlx90614.c b/drivers/iio/temperature/mlx90614.c
index cb2e8ad8bfdc..7a2b639eaa96 100644
--- a/drivers/iio/temperature/mlx90614.c
+++ b/drivers/iio/temperature/mlx90614.c
@@ -204,7 +204,7 @@ static int mlx90614_read_raw(struct iio_dev *indio_dev,
204 *val = ret; 204 *val = ret;
205 return IIO_VAL_INT; 205 return IIO_VAL_INT;
206 case IIO_CHAN_INFO_OFFSET: 206 case IIO_CHAN_INFO_OFFSET:
207 *val = 13657; 207 *val = -13657;
208 *val2 = 500000; 208 *val2 = 500000;
209 return IIO_VAL_INT_PLUS_MICRO; 209 return IIO_VAL_INT_PLUS_MICRO;
210 case IIO_CHAN_INFO_SCALE: 210 case IIO_CHAN_INFO_SCALE:
diff --git a/drivers/iio/temperature/tmp006.c b/drivers/iio/temperature/tmp006.c
index fcc49f89b946..8f21f32f9739 100644
--- a/drivers/iio/temperature/tmp006.c
+++ b/drivers/iio/temperature/tmp006.c
@@ -132,6 +132,9 @@ static int tmp006_write_raw(struct iio_dev *indio_dev,
132 struct tmp006_data *data = iio_priv(indio_dev); 132 struct tmp006_data *data = iio_priv(indio_dev);
133 int i; 133 int i;
134 134
135 if (mask != IIO_CHAN_INFO_SAMP_FREQ)
136 return -EINVAL;
137
135 for (i = 0; i < ARRAY_SIZE(tmp006_freqs); i++) 138 for (i = 0; i < ARRAY_SIZE(tmp006_freqs); i++)
136 if ((val == tmp006_freqs[i][0]) && 139 if ((val == tmp006_freqs[i][0]) &&
137 (val2 == tmp006_freqs[i][1])) { 140 (val2 == tmp006_freqs[i][1])) {
diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c
index c7dcfe4ca5f1..0429040304fd 100644
--- a/drivers/infiniband/core/agent.c
+++ b/drivers/infiniband/core/agent.c
@@ -88,7 +88,7 @@ void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh *
88 struct ib_ah *ah; 88 struct ib_ah *ah;
89 struct ib_mad_send_wr_private *mad_send_wr; 89 struct ib_mad_send_wr_private *mad_send_wr;
90 90
91 if (device->node_type == RDMA_NODE_IB_SWITCH) 91 if (rdma_cap_ib_switch(device))
92 port_priv = ib_get_agent_port(device, 0); 92 port_priv = ib_get_agent_port(device, 0);
93 else 93 else
94 port_priv = ib_get_agent_port(device, port_num); 94 port_priv = ib_get_agent_port(device, port_num);
@@ -122,7 +122,7 @@ void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh *
122 memcpy(send_buf->mad, mad_hdr, resp_mad_len); 122 memcpy(send_buf->mad, mad_hdr, resp_mad_len);
123 send_buf->ah = ah; 123 send_buf->ah = ah;
124 124
125 if (device->node_type == RDMA_NODE_IB_SWITCH) { 125 if (rdma_cap_ib_switch(device)) {
126 mad_send_wr = container_of(send_buf, 126 mad_send_wr = container_of(send_buf,
127 struct ib_mad_send_wr_private, 127 struct ib_mad_send_wr_private,
128 send_buf); 128 send_buf);
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index dbddddd6fb5d..3a972ebf3c0d 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -169,6 +169,7 @@ struct cm_device {
169 struct ib_device *ib_device; 169 struct ib_device *ib_device;
170 struct device *device; 170 struct device *device;
171 u8 ack_delay; 171 u8 ack_delay;
172 int going_down;
172 struct cm_port *port[0]; 173 struct cm_port *port[0];
173}; 174};
174 175
@@ -805,6 +806,11 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
805{ 806{
806 int wait_time; 807 int wait_time;
807 unsigned long flags; 808 unsigned long flags;
809 struct cm_device *cm_dev;
810
811 cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client);
812 if (!cm_dev)
813 return;
808 814
809 spin_lock_irqsave(&cm.lock, flags); 815 spin_lock_irqsave(&cm.lock, flags);
810 cm_cleanup_timewait(cm_id_priv->timewait_info); 816 cm_cleanup_timewait(cm_id_priv->timewait_info);
@@ -818,8 +824,14 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
818 */ 824 */
819 cm_id_priv->id.state = IB_CM_TIMEWAIT; 825 cm_id_priv->id.state = IB_CM_TIMEWAIT;
820 wait_time = cm_convert_to_ms(cm_id_priv->av.timeout); 826 wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
821 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work, 827
822 msecs_to_jiffies(wait_time)); 828 /* Check if the device started its remove_one */
829 spin_lock_irq(&cm.lock);
830 if (!cm_dev->going_down)
831 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
832 msecs_to_jiffies(wait_time));
833 spin_unlock_irq(&cm.lock);
834
823 cm_id_priv->timewait_info = NULL; 835 cm_id_priv->timewait_info = NULL;
824} 836}
825 837
@@ -3305,6 +3317,11 @@ static int cm_establish(struct ib_cm_id *cm_id)
3305 struct cm_work *work; 3317 struct cm_work *work;
3306 unsigned long flags; 3318 unsigned long flags;
3307 int ret = 0; 3319 int ret = 0;
3320 struct cm_device *cm_dev;
3321
3322 cm_dev = ib_get_client_data(cm_id->device, &cm_client);
3323 if (!cm_dev)
3324 return -ENODEV;
3308 3325
3309 work = kmalloc(sizeof *work, GFP_ATOMIC); 3326 work = kmalloc(sizeof *work, GFP_ATOMIC);
3310 if (!work) 3327 if (!work)
@@ -3343,7 +3360,17 @@ static int cm_establish(struct ib_cm_id *cm_id)
3343 work->remote_id = cm_id->remote_id; 3360 work->remote_id = cm_id->remote_id;
3344 work->mad_recv_wc = NULL; 3361 work->mad_recv_wc = NULL;
3345 work->cm_event.event = IB_CM_USER_ESTABLISHED; 3362 work->cm_event.event = IB_CM_USER_ESTABLISHED;
3346 queue_delayed_work(cm.wq, &work->work, 0); 3363
3364 /* Check if the device started its remove_one */
3365 spin_lock_irq(&cm.lock);
3366 if (!cm_dev->going_down) {
3367 queue_delayed_work(cm.wq, &work->work, 0);
3368 } else {
3369 kfree(work);
3370 ret = -ENODEV;
3371 }
3372 spin_unlock_irq(&cm.lock);
3373
3347out: 3374out:
3348 return ret; 3375 return ret;
3349} 3376}
@@ -3394,6 +3421,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
3394 enum ib_cm_event_type event; 3421 enum ib_cm_event_type event;
3395 u16 attr_id; 3422 u16 attr_id;
3396 int paths = 0; 3423 int paths = 0;
3424 int going_down = 0;
3397 3425
3398 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) { 3426 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
3399 case CM_REQ_ATTR_ID: 3427 case CM_REQ_ATTR_ID:
@@ -3452,7 +3480,19 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
3452 work->cm_event.event = event; 3480 work->cm_event.event = event;
3453 work->mad_recv_wc = mad_recv_wc; 3481 work->mad_recv_wc = mad_recv_wc;
3454 work->port = port; 3482 work->port = port;
3455 queue_delayed_work(cm.wq, &work->work, 0); 3483
3484 /* Check if the device started its remove_one */
3485 spin_lock_irq(&cm.lock);
3486 if (!port->cm_dev->going_down)
3487 queue_delayed_work(cm.wq, &work->work, 0);
3488 else
3489 going_down = 1;
3490 spin_unlock_irq(&cm.lock);
3491
3492 if (going_down) {
3493 kfree(work);
3494 ib_free_recv_mad(mad_recv_wc);
3495 }
3456} 3496}
3457 3497
3458static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv, 3498static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
@@ -3771,7 +3811,7 @@ static void cm_add_one(struct ib_device *ib_device)
3771 3811
3772 cm_dev->ib_device = ib_device; 3812 cm_dev->ib_device = ib_device;
3773 cm_get_ack_delay(cm_dev); 3813 cm_get_ack_delay(cm_dev);
3774 3814 cm_dev->going_down = 0;
3775 cm_dev->device = device_create(&cm_class, &ib_device->dev, 3815 cm_dev->device = device_create(&cm_class, &ib_device->dev,
3776 MKDEV(0, 0), NULL, 3816 MKDEV(0, 0), NULL,
3777 "%s", ib_device->name); 3817 "%s", ib_device->name);
@@ -3864,14 +3904,23 @@ static void cm_remove_one(struct ib_device *ib_device)
3864 list_del(&cm_dev->list); 3904 list_del(&cm_dev->list);
3865 write_unlock_irqrestore(&cm.device_lock, flags); 3905 write_unlock_irqrestore(&cm.device_lock, flags);
3866 3906
3907 spin_lock_irq(&cm.lock);
3908 cm_dev->going_down = 1;
3909 spin_unlock_irq(&cm.lock);
3910
3867 for (i = 1; i <= ib_device->phys_port_cnt; i++) { 3911 for (i = 1; i <= ib_device->phys_port_cnt; i++) {
3868 if (!rdma_cap_ib_cm(ib_device, i)) 3912 if (!rdma_cap_ib_cm(ib_device, i))
3869 continue; 3913 continue;
3870 3914
3871 port = cm_dev->port[i-1]; 3915 port = cm_dev->port[i-1];
3872 ib_modify_port(ib_device, port->port_num, 0, &port_modify); 3916 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
3873 ib_unregister_mad_agent(port->mad_agent); 3917 /*
3918 * We flush the queue here after the going_down set, this
3919 * verify that no new works will be queued in the recv handler,
3920 * after that we can call the unregister_mad_agent
3921 */
3874 flush_workqueue(cm.wq); 3922 flush_workqueue(cm.wq);
3923 ib_unregister_mad_agent(port->mad_agent);
3875 cm_remove_port_fs(port); 3924 cm_remove_port_fs(port);
3876 } 3925 }
3877 device_unregister(cm_dev->device); 3926 device_unregister(cm_dev->device);
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c
index e6ffa2e66c1a..22a3abee2a54 100644
--- a/drivers/infiniband/core/iwpm_msg.c
+++ b/drivers/infiniband/core/iwpm_msg.c
@@ -67,7 +67,8 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
67 err_str = "Invalid port mapper client"; 67 err_str = "Invalid port mapper client";
68 goto pid_query_error; 68 goto pid_query_error;
69 } 69 }
70 if (iwpm_registered_client(nl_client)) 70 if (iwpm_check_registration(nl_client, IWPM_REG_VALID) ||
71 iwpm_user_pid == IWPM_PID_UNAVAILABLE)
71 return 0; 72 return 0;
72 skb = iwpm_create_nlmsg(RDMA_NL_IWPM_REG_PID, &nlh, nl_client); 73 skb = iwpm_create_nlmsg(RDMA_NL_IWPM_REG_PID, &nlh, nl_client);
73 if (!skb) { 74 if (!skb) {
@@ -106,7 +107,6 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
106 ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_IWPM, GFP_KERNEL); 107 ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_IWPM, GFP_KERNEL);
107 if (ret) { 108 if (ret) {
108 skb = NULL; /* skb is freed in the netlink send-op handling */ 109 skb = NULL; /* skb is freed in the netlink send-op handling */
109 iwpm_set_registered(nl_client, 1);
110 iwpm_user_pid = IWPM_PID_UNAVAILABLE; 110 iwpm_user_pid = IWPM_PID_UNAVAILABLE;
111 err_str = "Unable to send a nlmsg"; 111 err_str = "Unable to send a nlmsg";
112 goto pid_query_error; 112 goto pid_query_error;
@@ -144,12 +144,12 @@ int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
144 err_str = "Invalid port mapper client"; 144 err_str = "Invalid port mapper client";
145 goto add_mapping_error; 145 goto add_mapping_error;
146 } 146 }
147 if (!iwpm_registered_client(nl_client)) { 147 if (!iwpm_valid_pid())
148 return 0;
149 if (!iwpm_check_registration(nl_client, IWPM_REG_VALID)) {
148 err_str = "Unregistered port mapper client"; 150 err_str = "Unregistered port mapper client";
149 goto add_mapping_error; 151 goto add_mapping_error;
150 } 152 }
151 if (!iwpm_valid_pid())
152 return 0;
153 skb = iwpm_create_nlmsg(RDMA_NL_IWPM_ADD_MAPPING, &nlh, nl_client); 153 skb = iwpm_create_nlmsg(RDMA_NL_IWPM_ADD_MAPPING, &nlh, nl_client);
154 if (!skb) { 154 if (!skb) {
155 err_str = "Unable to create a nlmsg"; 155 err_str = "Unable to create a nlmsg";
@@ -214,12 +214,12 @@ int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
214 err_str = "Invalid port mapper client"; 214 err_str = "Invalid port mapper client";
215 goto query_mapping_error; 215 goto query_mapping_error;
216 } 216 }
217 if (!iwpm_registered_client(nl_client)) { 217 if (!iwpm_valid_pid())
218 return 0;
219 if (!iwpm_check_registration(nl_client, IWPM_REG_VALID)) {
218 err_str = "Unregistered port mapper client"; 220 err_str = "Unregistered port mapper client";
219 goto query_mapping_error; 221 goto query_mapping_error;
220 } 222 }
221 if (!iwpm_valid_pid())
222 return 0;
223 ret = -ENOMEM; 223 ret = -ENOMEM;
224 skb = iwpm_create_nlmsg(RDMA_NL_IWPM_QUERY_MAPPING, &nlh, nl_client); 224 skb = iwpm_create_nlmsg(RDMA_NL_IWPM_QUERY_MAPPING, &nlh, nl_client);
225 if (!skb) { 225 if (!skb) {
@@ -288,12 +288,12 @@ int iwpm_remove_mapping(struct sockaddr_storage *local_addr, u8 nl_client)
288 err_str = "Invalid port mapper client"; 288 err_str = "Invalid port mapper client";
289 goto remove_mapping_error; 289 goto remove_mapping_error;
290 } 290 }
291 if (!iwpm_registered_client(nl_client)) { 291 if (!iwpm_valid_pid())
292 return 0;
293 if (iwpm_check_registration(nl_client, IWPM_REG_UNDEF)) {
292 err_str = "Unregistered port mapper client"; 294 err_str = "Unregistered port mapper client";
293 goto remove_mapping_error; 295 goto remove_mapping_error;
294 } 296 }
295 if (!iwpm_valid_pid())
296 return 0;
297 skb = iwpm_create_nlmsg(RDMA_NL_IWPM_REMOVE_MAPPING, &nlh, nl_client); 297 skb = iwpm_create_nlmsg(RDMA_NL_IWPM_REMOVE_MAPPING, &nlh, nl_client);
298 if (!skb) { 298 if (!skb) {
299 ret = -ENOMEM; 299 ret = -ENOMEM;
@@ -388,7 +388,7 @@ int iwpm_register_pid_cb(struct sk_buff *skb, struct netlink_callback *cb)
388 pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n", 388 pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n",
389 __func__, iwpm_user_pid); 389 __func__, iwpm_user_pid);
390 if (iwpm_valid_client(nl_client)) 390 if (iwpm_valid_client(nl_client))
391 iwpm_set_registered(nl_client, 1); 391 iwpm_set_registration(nl_client, IWPM_REG_VALID);
392register_pid_response_exit: 392register_pid_response_exit:
393 nlmsg_request->request_done = 1; 393 nlmsg_request->request_done = 1;
394 /* always for found nlmsg_request */ 394 /* always for found nlmsg_request */
@@ -644,7 +644,6 @@ int iwpm_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
644{ 644{
645 struct nlattr *nltb[IWPM_NLA_MAPINFO_REQ_MAX]; 645 struct nlattr *nltb[IWPM_NLA_MAPINFO_REQ_MAX];
646 const char *msg_type = "Mapping Info response"; 646 const char *msg_type = "Mapping Info response";
647 int iwpm_pid;
648 u8 nl_client; 647 u8 nl_client;
649 char *iwpm_name; 648 char *iwpm_name;
650 u16 iwpm_version; 649 u16 iwpm_version;
@@ -669,14 +668,14 @@ int iwpm_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
669 __func__, nl_client); 668 __func__, nl_client);
670 return ret; 669 return ret;
671 } 670 }
672 iwpm_set_registered(nl_client, 0); 671 iwpm_set_registration(nl_client, IWPM_REG_INCOMPL);
673 atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq); 672 atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
673 iwpm_user_pid = cb->nlh->nlmsg_pid;
674 if (!iwpm_mapinfo_available()) 674 if (!iwpm_mapinfo_available())
675 return 0; 675 return 0;
676 iwpm_pid = cb->nlh->nlmsg_pid;
677 pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n", 676 pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n",
678 __func__, iwpm_pid); 677 __func__, iwpm_user_pid);
679 ret = iwpm_send_mapinfo(nl_client, iwpm_pid); 678 ret = iwpm_send_mapinfo(nl_client, iwpm_user_pid);
680 return ret; 679 return ret;
681} 680}
682EXPORT_SYMBOL(iwpm_mapping_info_cb); 681EXPORT_SYMBOL(iwpm_mapping_info_cb);
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index a626795bf9c7..5fb089e91353 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -78,6 +78,7 @@ init_exit:
78 mutex_unlock(&iwpm_admin_lock); 78 mutex_unlock(&iwpm_admin_lock);
79 if (!ret) { 79 if (!ret) {
80 iwpm_set_valid(nl_client, 1); 80 iwpm_set_valid(nl_client, 1);
81 iwpm_set_registration(nl_client, IWPM_REG_UNDEF);
81 pr_debug("%s: Mapinfo and reminfo tables are created\n", 82 pr_debug("%s: Mapinfo and reminfo tables are created\n",
82 __func__); 83 __func__);
83 } 84 }
@@ -106,6 +107,7 @@ int iwpm_exit(u8 nl_client)
106 } 107 }
107 mutex_unlock(&iwpm_admin_lock); 108 mutex_unlock(&iwpm_admin_lock);
108 iwpm_set_valid(nl_client, 0); 109 iwpm_set_valid(nl_client, 0);
110 iwpm_set_registration(nl_client, IWPM_REG_UNDEF);
109 return 0; 111 return 0;
110} 112}
111EXPORT_SYMBOL(iwpm_exit); 113EXPORT_SYMBOL(iwpm_exit);
@@ -397,17 +399,23 @@ void iwpm_set_valid(u8 nl_client, int valid)
397} 399}
398 400
399/* valid client */ 401/* valid client */
400int iwpm_registered_client(u8 nl_client) 402u32 iwpm_get_registration(u8 nl_client)
401{ 403{
402 return iwpm_admin.reg_list[nl_client]; 404 return iwpm_admin.reg_list[nl_client];
403} 405}
404 406
405/* valid client */ 407/* valid client */
406void iwpm_set_registered(u8 nl_client, int reg) 408void iwpm_set_registration(u8 nl_client, u32 reg)
407{ 409{
408 iwpm_admin.reg_list[nl_client] = reg; 410 iwpm_admin.reg_list[nl_client] = reg;
409} 411}
410 412
413/* valid client */
414u32 iwpm_check_registration(u8 nl_client, u32 reg)
415{
416 return (iwpm_get_registration(nl_client) & reg);
417}
418
411int iwpm_compare_sockaddr(struct sockaddr_storage *a_sockaddr, 419int iwpm_compare_sockaddr(struct sockaddr_storage *a_sockaddr,
412 struct sockaddr_storage *b_sockaddr) 420 struct sockaddr_storage *b_sockaddr)
413{ 421{
diff --git a/drivers/infiniband/core/iwpm_util.h b/drivers/infiniband/core/iwpm_util.h
index ee2d9ff095be..b7b9e194ce81 100644
--- a/drivers/infiniband/core/iwpm_util.h
+++ b/drivers/infiniband/core/iwpm_util.h
@@ -58,6 +58,10 @@
58#define IWPM_PID_UNDEFINED -1 58#define IWPM_PID_UNDEFINED -1
59#define IWPM_PID_UNAVAILABLE -2 59#define IWPM_PID_UNAVAILABLE -2
60 60
61#define IWPM_REG_UNDEF 0x01
62#define IWPM_REG_VALID 0x02
63#define IWPM_REG_INCOMPL 0x04
64
61struct iwpm_nlmsg_request { 65struct iwpm_nlmsg_request {
62 struct list_head inprocess_list; 66 struct list_head inprocess_list;
63 __u32 nlmsg_seq; 67 __u32 nlmsg_seq;
@@ -88,7 +92,7 @@ struct iwpm_admin_data {
88 atomic_t refcount; 92 atomic_t refcount;
89 atomic_t nlmsg_seq; 93 atomic_t nlmsg_seq;
90 int client_list[RDMA_NL_NUM_CLIENTS]; 94 int client_list[RDMA_NL_NUM_CLIENTS];
91 int reg_list[RDMA_NL_NUM_CLIENTS]; 95 u32 reg_list[RDMA_NL_NUM_CLIENTS];
92}; 96};
93 97
94/** 98/**
@@ -159,19 +163,31 @@ int iwpm_valid_client(u8 nl_client);
159void iwpm_set_valid(u8 nl_client, int valid); 163void iwpm_set_valid(u8 nl_client, int valid);
160 164
161/** 165/**
162 * iwpm_registered_client - Check if the port mapper client is registered 166 * iwpm_check_registration - Check if the client registration
167 * matches the given one
163 * @nl_client: The index of the netlink client 168 * @nl_client: The index of the netlink client
169 * @reg: The given registration type to compare with
164 * 170 *
165 * Call iwpm_register_pid() to register a client 171 * Call iwpm_register_pid() to register a client
172 * Returns true if the client registration matches reg,
173 * otherwise returns false
174 */
175u32 iwpm_check_registration(u8 nl_client, u32 reg);
176
177/**
178 * iwpm_set_registration - Set the client registration
179 * @nl_client: The index of the netlink client
180 * @reg: Registration type to set
166 */ 181 */
167int iwpm_registered_client(u8 nl_client); 182void iwpm_set_registration(u8 nl_client, u32 reg);
168 183
169/** 184/**
170 * iwpm_set_registered - Set the port mapper client to registered or not 185 * iwpm_get_registration
171 * @nl_client: The index of the netlink client 186 * @nl_client: The index of the netlink client
172 * @reg: 1 if registered or 0 if not 187 *
188 * Returns the client registration type
173 */ 189 */
174void iwpm_set_registered(u8 nl_client, int reg); 190u32 iwpm_get_registration(u8 nl_client);
175 191
176/** 192/**
177 * iwpm_send_mapinfo - Send local and mapped IPv4/IPv6 address info of 193 * iwpm_send_mapinfo - Send local and mapped IPv4/IPv6 address info of
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index a4b1466c1bf6..786fc51bf04b 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -769,7 +769,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
769 bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, 769 bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
770 mad_agent_priv->qp_info->port_priv->port_num); 770 mad_agent_priv->qp_info->port_priv->port_num);
771 771
772 if (device->node_type == RDMA_NODE_IB_SWITCH && 772 if (rdma_cap_ib_switch(device) &&
773 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 773 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
774 port_num = send_wr->wr.ud.port_num; 774 port_num = send_wr->wr.ud.port_num;
775 else 775 else
@@ -787,14 +787,15 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
787 if ((opa_get_smp_direction(opa_smp) 787 if ((opa_get_smp_direction(opa_smp)
788 ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) == 788 ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) ==
789 OPA_LID_PERMISSIVE && 789 OPA_LID_PERMISSIVE &&
790 opa_smi_handle_dr_smp_send(opa_smp, device->node_type, 790 opa_smi_handle_dr_smp_send(opa_smp,
791 rdma_cap_ib_switch(device),
791 port_num) == IB_SMI_DISCARD) { 792 port_num) == IB_SMI_DISCARD) {
792 ret = -EINVAL; 793 ret = -EINVAL;
793 dev_err(&device->dev, "OPA Invalid directed route\n"); 794 dev_err(&device->dev, "OPA Invalid directed route\n");
794 goto out; 795 goto out;
795 } 796 }
796 opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid); 797 opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid);
797 if (opa_drslid != OPA_LID_PERMISSIVE && 798 if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) &&
798 opa_drslid & 0xffff0000) { 799 opa_drslid & 0xffff0000) {
799 ret = -EINVAL; 800 ret = -EINVAL;
800 dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n", 801 dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n",
@@ -810,7 +811,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
810 } else { 811 } else {
811 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) == 812 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
812 IB_LID_PERMISSIVE && 813 IB_LID_PERMISSIVE &&
813 smi_handle_dr_smp_send(smp, device->node_type, port_num) == 814 smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) ==
814 IB_SMI_DISCARD) { 815 IB_SMI_DISCARD) {
815 ret = -EINVAL; 816 ret = -EINVAL;
816 dev_err(&device->dev, "Invalid directed route\n"); 817 dev_err(&device->dev, "Invalid directed route\n");
@@ -2030,7 +2031,7 @@ static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv
2030 struct ib_smp *smp = (struct ib_smp *)recv->mad; 2031 struct ib_smp *smp = (struct ib_smp *)recv->mad;
2031 2032
2032 if (smi_handle_dr_smp_recv(smp, 2033 if (smi_handle_dr_smp_recv(smp,
2033 port_priv->device->node_type, 2034 rdma_cap_ib_switch(port_priv->device),
2034 port_num, 2035 port_num,
2035 port_priv->device->phys_port_cnt) == 2036 port_priv->device->phys_port_cnt) ==
2036 IB_SMI_DISCARD) 2037 IB_SMI_DISCARD)
@@ -2042,13 +2043,13 @@ static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv
2042 2043
2043 if (retsmi == IB_SMI_SEND) { /* don't forward */ 2044 if (retsmi == IB_SMI_SEND) { /* don't forward */
2044 if (smi_handle_dr_smp_send(smp, 2045 if (smi_handle_dr_smp_send(smp,
2045 port_priv->device->node_type, 2046 rdma_cap_ib_switch(port_priv->device),
2046 port_num) == IB_SMI_DISCARD) 2047 port_num) == IB_SMI_DISCARD)
2047 return IB_SMI_DISCARD; 2048 return IB_SMI_DISCARD;
2048 2049
2049 if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD) 2050 if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD)
2050 return IB_SMI_DISCARD; 2051 return IB_SMI_DISCARD;
2051 } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) { 2052 } else if (rdma_cap_ib_switch(port_priv->device)) {
2052 /* forward case for switches */ 2053 /* forward case for switches */
2053 memcpy(response, recv, mad_priv_size(response)); 2054 memcpy(response, recv, mad_priv_size(response));
2054 response->header.recv_wc.wc = &response->header.wc; 2055 response->header.recv_wc.wc = &response->header.wc;
@@ -2115,7 +2116,7 @@ handle_opa_smi(struct ib_mad_port_private *port_priv,
2115 struct opa_smp *smp = (struct opa_smp *)recv->mad; 2116 struct opa_smp *smp = (struct opa_smp *)recv->mad;
2116 2117
2117 if (opa_smi_handle_dr_smp_recv(smp, 2118 if (opa_smi_handle_dr_smp_recv(smp,
2118 port_priv->device->node_type, 2119 rdma_cap_ib_switch(port_priv->device),
2119 port_num, 2120 port_num,
2120 port_priv->device->phys_port_cnt) == 2121 port_priv->device->phys_port_cnt) ==
2121 IB_SMI_DISCARD) 2122 IB_SMI_DISCARD)
@@ -2127,7 +2128,7 @@ handle_opa_smi(struct ib_mad_port_private *port_priv,
2127 2128
2128 if (retsmi == IB_SMI_SEND) { /* don't forward */ 2129 if (retsmi == IB_SMI_SEND) { /* don't forward */
2129 if (opa_smi_handle_dr_smp_send(smp, 2130 if (opa_smi_handle_dr_smp_send(smp,
2130 port_priv->device->node_type, 2131 rdma_cap_ib_switch(port_priv->device),
2131 port_num) == IB_SMI_DISCARD) 2132 port_num) == IB_SMI_DISCARD)
2132 return IB_SMI_DISCARD; 2133 return IB_SMI_DISCARD;
2133 2134
@@ -2135,7 +2136,7 @@ handle_opa_smi(struct ib_mad_port_private *port_priv,
2135 IB_SMI_DISCARD) 2136 IB_SMI_DISCARD)
2136 return IB_SMI_DISCARD; 2137 return IB_SMI_DISCARD;
2137 2138
2138 } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) { 2139 } else if (rdma_cap_ib_switch(port_priv->device)) {
2139 /* forward case for switches */ 2140 /* forward case for switches */
2140 memcpy(response, recv, mad_priv_size(response)); 2141 memcpy(response, recv, mad_priv_size(response));
2141 response->header.recv_wc.wc = &response->header.wc; 2142 response->header.recv_wc.wc = &response->header.wc;
@@ -2235,7 +2236,7 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
2235 goto out; 2236 goto out;
2236 } 2237 }
2237 2238
2238 if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) 2239 if (rdma_cap_ib_switch(port_priv->device))
2239 port_num = wc->port_num; 2240 port_num = wc->port_num;
2240 else 2241 else
2241 port_num = port_priv->port_num; 2242 port_num = port_priv->port_num;
@@ -3297,17 +3298,11 @@ static int ib_mad_port_close(struct ib_device *device, int port_num)
3297 3298
3298static void ib_mad_init_device(struct ib_device *device) 3299static void ib_mad_init_device(struct ib_device *device)
3299{ 3300{
3300 int start, end, i; 3301 int start, i;
3301 3302
3302 if (device->node_type == RDMA_NODE_IB_SWITCH) { 3303 start = rdma_start_port(device);
3303 start = 0;
3304 end = 0;
3305 } else {
3306 start = 1;
3307 end = device->phys_port_cnt;
3308 }
3309 3304
3310 for (i = start; i <= end; i++) { 3305 for (i = start; i <= rdma_end_port(device); i++) {
3311 if (!rdma_cap_ib_mad(device, i)) 3306 if (!rdma_cap_ib_mad(device, i))
3312 continue; 3307 continue;
3313 3308
@@ -3342,17 +3337,9 @@ error:
3342 3337
3343static void ib_mad_remove_device(struct ib_device *device) 3338static void ib_mad_remove_device(struct ib_device *device)
3344{ 3339{
3345 int start, end, i; 3340 int i;
3346
3347 if (device->node_type == RDMA_NODE_IB_SWITCH) {
3348 start = 0;
3349 end = 0;
3350 } else {
3351 start = 1;
3352 end = device->phys_port_cnt;
3353 }
3354 3341
3355 for (i = start; i <= end; i++) { 3342 for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
3356 if (!rdma_cap_ib_mad(device, i)) 3343 if (!rdma_cap_ib_mad(device, i))
3357 continue; 3344 continue;
3358 3345
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index 1244f02a5c6d..2cb865c7ce7a 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -812,12 +812,8 @@ static void mcast_add_one(struct ib_device *device)
812 if (!dev) 812 if (!dev)
813 return; 813 return;
814 814
815 if (device->node_type == RDMA_NODE_IB_SWITCH) 815 dev->start_port = rdma_start_port(device);
816 dev->start_port = dev->end_port = 0; 816 dev->end_port = rdma_end_port(device);
817 else {
818 dev->start_port = 1;
819 dev->end_port = device->phys_port_cnt;
820 }
821 817
822 for (i = 0; i <= dev->end_port - dev->start_port; i++) { 818 for (i = 0; i <= dev->end_port - dev->start_port; i++) {
823 if (!rdma_cap_ib_mcast(device, dev->start_port + i)) 819 if (!rdma_cap_ib_mcast(device, dev->start_port + i))
diff --git a/drivers/infiniband/core/opa_smi.h b/drivers/infiniband/core/opa_smi.h
index 62d91bfa4cb7..3bfab3505a29 100644
--- a/drivers/infiniband/core/opa_smi.h
+++ b/drivers/infiniband/core/opa_smi.h
@@ -39,12 +39,12 @@
39 39
40#include "smi.h" 40#include "smi.h"
41 41
42enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, u8 node_type, 42enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, bool is_switch,
43 int port_num, int phys_port_cnt); 43 int port_num, int phys_port_cnt);
44int opa_smi_get_fwd_port(struct opa_smp *smp); 44int opa_smi_get_fwd_port(struct opa_smp *smp);
45extern enum smi_forward_action opa_smi_check_forward_dr_smp(struct opa_smp *smp); 45extern enum smi_forward_action opa_smi_check_forward_dr_smp(struct opa_smp *smp);
46extern enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp, 46extern enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp,
47 u8 node_type, int port_num); 47 bool is_switch, int port_num);
48 48
49/* 49/*
50 * Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM 50 * Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 0fae85062a65..ca919f429666 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1156,12 +1156,8 @@ static void ib_sa_add_one(struct ib_device *device)
1156 int s, e, i; 1156 int s, e, i;
1157 int count = 0; 1157 int count = 0;
1158 1158
1159 if (device->node_type == RDMA_NODE_IB_SWITCH) 1159 s = rdma_start_port(device);
1160 s = e = 0; 1160 e = rdma_end_port(device);
1161 else {
1162 s = 1;
1163 e = device->phys_port_cnt;
1164 }
1165 1161
1166 sa_dev = kzalloc(sizeof *sa_dev + 1162 sa_dev = kzalloc(sizeof *sa_dev +
1167 (e - s + 1) * sizeof (struct ib_sa_port), 1163 (e - s + 1) * sizeof (struct ib_sa_port),
diff --git a/drivers/infiniband/core/smi.c b/drivers/infiniband/core/smi.c
index 368a561d1a5d..f19b23817c2b 100644
--- a/drivers/infiniband/core/smi.c
+++ b/drivers/infiniband/core/smi.c
@@ -41,7 +41,7 @@
41#include "smi.h" 41#include "smi.h"
42#include "opa_smi.h" 42#include "opa_smi.h"
43 43
44static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num, 44static enum smi_action __smi_handle_dr_smp_send(bool is_switch, int port_num,
45 u8 *hop_ptr, u8 hop_cnt, 45 u8 *hop_ptr, u8 hop_cnt,
46 const u8 *initial_path, 46 const u8 *initial_path,
47 const u8 *return_path, 47 const u8 *return_path,
@@ -64,7 +64,7 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
64 64
65 /* C14-9:2 */ 65 /* C14-9:2 */
66 if (*hop_ptr && *hop_ptr < hop_cnt) { 66 if (*hop_ptr && *hop_ptr < hop_cnt) {
67 if (node_type != RDMA_NODE_IB_SWITCH) 67 if (!is_switch)
68 return IB_SMI_DISCARD; 68 return IB_SMI_DISCARD;
69 69
70 /* return_path set when received */ 70 /* return_path set when received */
@@ -77,7 +77,7 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
77 if (*hop_ptr == hop_cnt) { 77 if (*hop_ptr == hop_cnt) {
78 /* return_path set when received */ 78 /* return_path set when received */
79 (*hop_ptr)++; 79 (*hop_ptr)++;
80 return (node_type == RDMA_NODE_IB_SWITCH || 80 return (is_switch ||
81 dr_dlid_is_permissive ? 81 dr_dlid_is_permissive ?
82 IB_SMI_HANDLE : IB_SMI_DISCARD); 82 IB_SMI_HANDLE : IB_SMI_DISCARD);
83 } 83 }
@@ -96,7 +96,7 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
96 96
97 /* C14-13:2 */ 97 /* C14-13:2 */
98 if (2 <= *hop_ptr && *hop_ptr <= hop_cnt) { 98 if (2 <= *hop_ptr && *hop_ptr <= hop_cnt) {
99 if (node_type != RDMA_NODE_IB_SWITCH) 99 if (!is_switch)
100 return IB_SMI_DISCARD; 100 return IB_SMI_DISCARD;
101 101
102 (*hop_ptr)--; 102 (*hop_ptr)--;
@@ -108,7 +108,7 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
108 if (*hop_ptr == 1) { 108 if (*hop_ptr == 1) {
109 (*hop_ptr)--; 109 (*hop_ptr)--;
110 /* C14-13:3 -- SMPs destined for SM shouldn't be here */ 110 /* C14-13:3 -- SMPs destined for SM shouldn't be here */
111 return (node_type == RDMA_NODE_IB_SWITCH || 111 return (is_switch ||
112 dr_slid_is_permissive ? 112 dr_slid_is_permissive ?
113 IB_SMI_HANDLE : IB_SMI_DISCARD); 113 IB_SMI_HANDLE : IB_SMI_DISCARD);
114 } 114 }
@@ -127,9 +127,9 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
127 * Return IB_SMI_DISCARD if the SMP should be discarded 127 * Return IB_SMI_DISCARD if the SMP should be discarded
128 */ 128 */
129enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp, 129enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
130 u8 node_type, int port_num) 130 bool is_switch, int port_num)
131{ 131{
132 return __smi_handle_dr_smp_send(node_type, port_num, 132 return __smi_handle_dr_smp_send(is_switch, port_num,
133 &smp->hop_ptr, smp->hop_cnt, 133 &smp->hop_ptr, smp->hop_cnt,
134 smp->initial_path, 134 smp->initial_path,
135 smp->return_path, 135 smp->return_path,
@@ -139,9 +139,9 @@ enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
139} 139}
140 140
141enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp, 141enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp,
142 u8 node_type, int port_num) 142 bool is_switch, int port_num)
143{ 143{
144 return __smi_handle_dr_smp_send(node_type, port_num, 144 return __smi_handle_dr_smp_send(is_switch, port_num,
145 &smp->hop_ptr, smp->hop_cnt, 145 &smp->hop_ptr, smp->hop_cnt,
146 smp->route.dr.initial_path, 146 smp->route.dr.initial_path,
147 smp->route.dr.return_path, 147 smp->route.dr.return_path,
@@ -152,7 +152,7 @@ enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp,
152 OPA_LID_PERMISSIVE); 152 OPA_LID_PERMISSIVE);
153} 153}
154 154
155static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num, 155static enum smi_action __smi_handle_dr_smp_recv(bool is_switch, int port_num,
156 int phys_port_cnt, 156 int phys_port_cnt,
157 u8 *hop_ptr, u8 hop_cnt, 157 u8 *hop_ptr, u8 hop_cnt,
158 const u8 *initial_path, 158 const u8 *initial_path,
@@ -173,7 +173,7 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
173 173
174 /* C14-9:2 -- intermediate hop */ 174 /* C14-9:2 -- intermediate hop */
175 if (*hop_ptr && *hop_ptr < hop_cnt) { 175 if (*hop_ptr && *hop_ptr < hop_cnt) {
176 if (node_type != RDMA_NODE_IB_SWITCH) 176 if (!is_switch)
177 return IB_SMI_DISCARD; 177 return IB_SMI_DISCARD;
178 178
179 return_path[*hop_ptr] = port_num; 179 return_path[*hop_ptr] = port_num;
@@ -188,7 +188,7 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
188 return_path[*hop_ptr] = port_num; 188 return_path[*hop_ptr] = port_num;
189 /* hop_ptr updated when sending */ 189 /* hop_ptr updated when sending */
190 190
191 return (node_type == RDMA_NODE_IB_SWITCH || 191 return (is_switch ||
192 dr_dlid_is_permissive ? 192 dr_dlid_is_permissive ?
193 IB_SMI_HANDLE : IB_SMI_DISCARD); 193 IB_SMI_HANDLE : IB_SMI_DISCARD);
194 } 194 }
@@ -208,7 +208,7 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
208 208
209 /* C14-13:2 */ 209 /* C14-13:2 */
210 if (2 <= *hop_ptr && *hop_ptr <= hop_cnt) { 210 if (2 <= *hop_ptr && *hop_ptr <= hop_cnt) {
211 if (node_type != RDMA_NODE_IB_SWITCH) 211 if (!is_switch)
212 return IB_SMI_DISCARD; 212 return IB_SMI_DISCARD;
213 213
214 /* hop_ptr updated when sending */ 214 /* hop_ptr updated when sending */
@@ -224,8 +224,7 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
224 return IB_SMI_HANDLE; 224 return IB_SMI_HANDLE;
225 } 225 }
226 /* hop_ptr updated when sending */ 226 /* hop_ptr updated when sending */
227 return (node_type == RDMA_NODE_IB_SWITCH ? 227 return (is_switch ? IB_SMI_HANDLE : IB_SMI_DISCARD);
228 IB_SMI_HANDLE : IB_SMI_DISCARD);
229 } 228 }
230 229
231 /* C14-13:4 -- hop_ptr = 0 -> give to SM */ 230 /* C14-13:4 -- hop_ptr = 0 -> give to SM */
@@ -238,10 +237,10 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
238 * Adjust information for a received SMP 237 * Adjust information for a received SMP
239 * Return IB_SMI_DISCARD if the SMP should be dropped 238 * Return IB_SMI_DISCARD if the SMP should be dropped
240 */ 239 */
241enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type, 240enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, bool is_switch,
242 int port_num, int phys_port_cnt) 241 int port_num, int phys_port_cnt)
243{ 242{
244 return __smi_handle_dr_smp_recv(node_type, port_num, phys_port_cnt, 243 return __smi_handle_dr_smp_recv(is_switch, port_num, phys_port_cnt,
245 &smp->hop_ptr, smp->hop_cnt, 244 &smp->hop_ptr, smp->hop_cnt,
246 smp->initial_path, 245 smp->initial_path,
247 smp->return_path, 246 smp->return_path,
@@ -254,10 +253,10 @@ enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type,
254 * Adjust information for a received SMP 253 * Adjust information for a received SMP
255 * Return IB_SMI_DISCARD if the SMP should be dropped 254 * Return IB_SMI_DISCARD if the SMP should be dropped
256 */ 255 */
257enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, u8 node_type, 256enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, bool is_switch,
258 int port_num, int phys_port_cnt) 257 int port_num, int phys_port_cnt)
259{ 258{
260 return __smi_handle_dr_smp_recv(node_type, port_num, phys_port_cnt, 259 return __smi_handle_dr_smp_recv(is_switch, port_num, phys_port_cnt,
261 &smp->hop_ptr, smp->hop_cnt, 260 &smp->hop_ptr, smp->hop_cnt,
262 smp->route.dr.initial_path, 261 smp->route.dr.initial_path,
263 smp->route.dr.return_path, 262 smp->route.dr.return_path,
diff --git a/drivers/infiniband/core/smi.h b/drivers/infiniband/core/smi.h
index aff96bac49b4..33c91c8a16e9 100644
--- a/drivers/infiniband/core/smi.h
+++ b/drivers/infiniband/core/smi.h
@@ -51,12 +51,12 @@ enum smi_forward_action {
51 IB_SMI_FORWARD /* SMP should be forwarded (for switches only) */ 51 IB_SMI_FORWARD /* SMP should be forwarded (for switches only) */
52}; 52};
53 53
54enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type, 54enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, bool is_switch,
55 int port_num, int phys_port_cnt); 55 int port_num, int phys_port_cnt);
56int smi_get_fwd_port(struct ib_smp *smp); 56int smi_get_fwd_port(struct ib_smp *smp);
57extern enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp); 57extern enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp);
58extern enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp, 58extern enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
59 u8 node_type, int port_num); 59 bool is_switch, int port_num);
60 60
61/* 61/*
62 * Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM 62 * Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index ed6b6c85c334..0b84a9cdfe5b 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -870,7 +870,7 @@ int ib_device_register_sysfs(struct ib_device *device,
870 goto err_put; 870 goto err_put;
871 } 871 }
872 872
873 if (device->node_type == RDMA_NODE_IB_SWITCH) { 873 if (rdma_cap_ib_switch(device)) {
874 ret = add_port(device, 0, port_callback); 874 ret = add_port(device, 0, port_callback);
875 if (ret) 875 if (ret)
876 goto err_put; 876 goto err_put;
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 62c24b1452b8..009481073644 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -1193,6 +1193,7 @@ static int ib_ucm_close(struct inode *inode, struct file *filp)
1193 return 0; 1193 return 0;
1194} 1194}
1195 1195
1196static DECLARE_BITMAP(overflow_map, IB_UCM_MAX_DEVICES);
1196static void ib_ucm_release_dev(struct device *dev) 1197static void ib_ucm_release_dev(struct device *dev)
1197{ 1198{
1198 struct ib_ucm_device *ucm_dev; 1199 struct ib_ucm_device *ucm_dev;
@@ -1202,7 +1203,7 @@ static void ib_ucm_release_dev(struct device *dev)
1202 if (ucm_dev->devnum < IB_UCM_MAX_DEVICES) 1203 if (ucm_dev->devnum < IB_UCM_MAX_DEVICES)
1203 clear_bit(ucm_dev->devnum, dev_map); 1204 clear_bit(ucm_dev->devnum, dev_map);
1204 else 1205 else
1205 clear_bit(ucm_dev->devnum - IB_UCM_MAX_DEVICES, dev_map); 1206 clear_bit(ucm_dev->devnum - IB_UCM_MAX_DEVICES, overflow_map);
1206 kfree(ucm_dev); 1207 kfree(ucm_dev);
1207} 1208}
1208 1209
@@ -1226,7 +1227,6 @@ static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
1226static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL); 1227static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
1227 1228
1228static dev_t overflow_maj; 1229static dev_t overflow_maj;
1229static DECLARE_BITMAP(overflow_map, IB_UCM_MAX_DEVICES);
1230static int find_overflow_devnum(void) 1230static int find_overflow_devnum(void)
1231{ 1231{
1232 int ret; 1232 int ret;
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index ad45469f7582..29b21213ea75 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1354,10 +1354,10 @@ static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
1354 /* Acquire mutex's based on pointer comparison to prevent deadlock. */ 1354 /* Acquire mutex's based on pointer comparison to prevent deadlock. */
1355 if (file1 < file2) { 1355 if (file1 < file2) {
1356 mutex_lock(&file1->mut); 1356 mutex_lock(&file1->mut);
1357 mutex_lock(&file2->mut); 1357 mutex_lock_nested(&file2->mut, SINGLE_DEPTH_NESTING);
1358 } else { 1358 } else {
1359 mutex_lock(&file2->mut); 1359 mutex_lock(&file2->mut);
1360 mutex_lock(&file1->mut); 1360 mutex_lock_nested(&file1->mut, SINGLE_DEPTH_NESTING);
1361 } 1361 }
1362} 1362}
1363 1363
@@ -1616,6 +1616,7 @@ static void __exit ucma_cleanup(void)
1616 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version); 1616 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1617 misc_deregister(&ucma_misc); 1617 misc_deregister(&ucma_misc);
1618 idr_destroy(&ctx_idr); 1618 idr_destroy(&ctx_idr);
1619 idr_destroy(&multicast_idr);
1619} 1620}
1620 1621
1621module_init(ucma_init); 1622module_init(ucma_init);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index b1b73232f217..bbbe0184e592 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -736,6 +736,10 @@ static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc)
736 /* 736 /*
737 * T3 only supports 32 bits of size. 737 * T3 only supports 32 bits of size.
738 */ 738 */
739 if (sizeof(phys_addr_t) > 4) {
740 pr_warn_once(MOD "Cannot support dma_mrs on this platform.\n");
741 return ERR_PTR(-ENOTSUPP);
742 }
739 bl.size = 0xffffffff; 743 bl.size = 0xffffffff;
740 bl.addr = 0; 744 bl.addr = 0;
741 kva = 0; 745 kva = 0;
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index c7aab48f07cd..92d518382a9f 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -814,7 +814,7 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
814 printk(KERN_ERR MOD 814 printk(KERN_ERR MOD
815 "Unexpected cqe_status 0x%x for QPID=0x%0x\n", 815 "Unexpected cqe_status 0x%x for QPID=0x%0x\n",
816 CQE_STATUS(&cqe), CQE_QPID(&cqe)); 816 CQE_STATUS(&cqe), CQE_QPID(&cqe));
817 ret = -EINVAL; 817 wc->status = IB_WC_FATAL_ERR;
818 } 818 }
819 } 819 }
820out: 820out:
diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/infiniband/hw/ehca/ehca_sqp.c
index 12b5bc23832b..376b031c2c7f 100644
--- a/drivers/infiniband/hw/ehca/ehca_sqp.c
+++ b/drivers/infiniband/hw/ehca/ehca_sqp.c
@@ -226,8 +226,9 @@ int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
226 const struct ib_mad *in_mad = (const struct ib_mad *)in; 226 const struct ib_mad *in_mad = (const struct ib_mad *)in;
227 struct ib_mad *out_mad = (struct ib_mad *)out; 227 struct ib_mad *out_mad = (struct ib_mad *)out;
228 228
229 BUG_ON(in_mad_size != sizeof(*in_mad) || 229 if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
230 *out_mad_size != sizeof(*out_mad)); 230 *out_mad_size != sizeof(*out_mad)))
231 return IB_MAD_RESULT_FAILURE;
231 232
232 if (!port_num || port_num > ibdev->phys_port_cnt || !in_wc) 233 if (!port_num || port_num > ibdev->phys_port_cnt || !in_wc)
233 return IB_MAD_RESULT_FAILURE; 234 return IB_MAD_RESULT_FAILURE;
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 2d7e503d13cb..871dbe56216a 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -31,6 +31,8 @@
31 * SOFTWARE. 31 * SOFTWARE.
32 */ 32 */
33 33
34#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35
34#include <linux/sched.h> 36#include <linux/sched.h>
35#include <linux/spinlock.h> 37#include <linux/spinlock.h>
36#include <linux/idr.h> 38#include <linux/idr.h>
@@ -399,8 +401,8 @@ static int ipath_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
399 u32 bar0 = 0, bar1 = 0; 401 u32 bar0 = 0, bar1 = 0;
400 402
401#ifdef CONFIG_X86_64 403#ifdef CONFIG_X86_64
402 if (WARN(pat_enabled(), 404 if (pat_enabled()) {
403 "ipath needs PAT disabled, boot with nopat kernel parameter\n")) { 405 pr_warn("ipath needs PAT disabled, boot with nopat kernel parameter\n");
404 ret = -ENODEV; 406 ret = -ENODEV;
405 goto bail; 407 goto bail;
406 } 408 }
diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c
index 948188e37f95..ad3a926ab3c5 100644
--- a/drivers/infiniband/hw/ipath/ipath_mad.c
+++ b/drivers/infiniband/hw/ipath/ipath_mad.c
@@ -1499,8 +1499,9 @@ int ipath_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
1499 const struct ib_mad *in_mad = (const struct ib_mad *)in; 1499 const struct ib_mad *in_mad = (const struct ib_mad *)in;
1500 struct ib_mad *out_mad = (struct ib_mad *)out; 1500 struct ib_mad *out_mad = (struct ib_mad *)out;
1501 1501
1502 BUG_ON(in_mad_size != sizeof(*in_mad) || 1502 if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
1503 *out_mad_size != sizeof(*out_mad)); 1503 *out_mad_size != sizeof(*out_mad)))
1504 return IB_MAD_RESULT_FAILURE;
1504 1505
1505 switch (in_mad->mad_hdr.mgmt_class) { 1506 switch (in_mad->mad_hdr.mgmt_class) {
1506 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: 1507 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index 48253b839a6f..30ba49c4a98c 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -2044,9 +2044,9 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
2044 2044
2045 spin_lock_init(&idev->qp_table.lock); 2045 spin_lock_init(&idev->qp_table.lock);
2046 spin_lock_init(&idev->lk_table.lock); 2046 spin_lock_init(&idev->lk_table.lock);
2047 idev->sm_lid = __constant_be16_to_cpu(IB_LID_PERMISSIVE); 2047 idev->sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
2048 /* Set the prefix to the default value (see ch. 4.1.1) */ 2048 /* Set the prefix to the default value (see ch. 4.1.1) */
2049 idev->gid_prefix = __constant_cpu_to_be64(0xfe80000000000000ULL); 2049 idev->gid_prefix = cpu_to_be64(0xfe80000000000000ULL);
2050 2050
2051 ret = ipath_init_qp_table(idev, ib_ipath_qp_table_size); 2051 ret = ipath_init_qp_table(idev, ib_ipath_qp_table_size);
2052 if (ret) 2052 if (ret)
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 85a50df2f203..68b3dfa922bf 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -860,21 +860,31 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
860 struct mlx4_ib_dev *dev = to_mdev(ibdev); 860 struct mlx4_ib_dev *dev = to_mdev(ibdev);
861 const struct ib_mad *in_mad = (const struct ib_mad *)in; 861 const struct ib_mad *in_mad = (const struct ib_mad *)in;
862 struct ib_mad *out_mad = (struct ib_mad *)out; 862 struct ib_mad *out_mad = (struct ib_mad *)out;
863 enum rdma_link_layer link = rdma_port_get_link_layer(ibdev, port_num);
863 864
864 BUG_ON(in_mad_size != sizeof(*in_mad) || 865 if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
865 *out_mad_size != sizeof(*out_mad)); 866 *out_mad_size != sizeof(*out_mad)))
867 return IB_MAD_RESULT_FAILURE;
866 868
867 switch (rdma_port_get_link_layer(ibdev, port_num)) { 869 /* iboe_process_mad() which uses the HCA flow-counters to implement IB PMA
868 case IB_LINK_LAYER_INFINIBAND: 870 * queries, should be called only by VFs and for that specific purpose
869 if (!mlx4_is_slave(dev->dev)) 871 */
870 return ib_process_mad(ibdev, mad_flags, port_num, in_wc, 872 if (link == IB_LINK_LAYER_INFINIBAND) {
871 in_grh, in_mad, out_mad); 873 if (mlx4_is_slave(dev->dev) &&
872 case IB_LINK_LAYER_ETHERNET: 874 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
873 return iboe_process_mad(ibdev, mad_flags, port_num, in_wc, 875 in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS)
874 in_grh, in_mad, out_mad); 876 return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
875 default: 877 in_grh, in_mad, out_mad);
876 return -EINVAL; 878
879 return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
880 in_grh, in_mad, out_mad);
877 } 881 }
882
883 if (link == IB_LINK_LAYER_ETHERNET)
884 return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
885 in_grh, in_mad, out_mad);
886
887 return -EINVAL;
878} 888}
879 889
880static void send_handler(struct ib_mad_agent *agent, 890static void send_handler(struct ib_mad_agent *agent,
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 067a691ecbed..8be6db816460 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -253,14 +253,15 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
253 props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL; 253 props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL;
254 props->timestamp_mask = 0xFFFFFFFFFFFFULL; 254 props->timestamp_mask = 0xFFFFFFFFFFFFULL;
255 255
256 err = mlx4_get_internal_clock_params(dev->dev, &clock_params); 256 if (!mlx4_is_slave(dev->dev))
257 if (err) 257 err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
258 goto out;
259 258
260 if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) { 259 if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
261 resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
262 resp.response_length += sizeof(resp.hca_core_clock_offset); 260 resp.response_length += sizeof(resp.hca_core_clock_offset);
263 resp.comp_mask |= QUERY_DEVICE_RESP_MASK_TIMESTAMP; 261 if (!err && !mlx4_is_slave(dev->dev)) {
262 resp.comp_mask |= QUERY_DEVICE_RESP_MASK_TIMESTAMP;
263 resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
264 }
264 } 265 }
265 266
266 if (uhw->outlen) { 267 if (uhw->outlen) {
@@ -2669,31 +2670,33 @@ static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
2669 dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC); 2670 dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
2670 if (!dm) { 2671 if (!dm) {
2671 pr_err("failed to allocate memory for tunneling qp update\n"); 2672 pr_err("failed to allocate memory for tunneling qp update\n");
2672 goto out; 2673 return;
2673 } 2674 }
2674 2675
2675 for (i = 0; i < ports; i++) { 2676 for (i = 0; i < ports; i++) {
2676 dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC); 2677 dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
2677 if (!dm[i]) { 2678 if (!dm[i]) {
2678 pr_err("failed to allocate memory for tunneling qp update work struct\n"); 2679 pr_err("failed to allocate memory for tunneling qp update work struct\n");
2679 for (i = 0; i < dev->caps.num_ports; i++) { 2680 while (--i >= 0)
2680 if (dm[i]) 2681 kfree(dm[i]);
2681 kfree(dm[i]);
2682 }
2683 goto out; 2682 goto out;
2684 } 2683 }
2685 }
2686 /* initialize or tear down tunnel QPs for the slave */
2687 for (i = 0; i < ports; i++) {
2688 INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work); 2684 INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
2689 dm[i]->port = first_port + i + 1; 2685 dm[i]->port = first_port + i + 1;
2690 dm[i]->slave = slave; 2686 dm[i]->slave = slave;
2691 dm[i]->do_init = do_init; 2687 dm[i]->do_init = do_init;
2692 dm[i]->dev = ibdev; 2688 dm[i]->dev = ibdev;
2693 spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags); 2689 }
2694 if (!ibdev->sriov.is_going_down) 2690 /* initialize or tear down tunnel QPs for the slave */
2691 spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
2692 if (!ibdev->sriov.is_going_down) {
2693 for (i = 0; i < ports; i++)
2695 queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work); 2694 queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
2696 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags); 2695 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
2696 } else {
2697 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
2698 for (i = 0; i < ports; i++)
2699 kfree(dm[i]);
2697 } 2700 }
2698out: 2701out:
2699 kfree(dm); 2702 kfree(dm);
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 01fc97db45d6..b84d13a487cc 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -68,8 +68,9 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
68 const struct ib_mad *in_mad = (const struct ib_mad *)in; 68 const struct ib_mad *in_mad = (const struct ib_mad *)in;
69 struct ib_mad *out_mad = (struct ib_mad *)out; 69 struct ib_mad *out_mad = (struct ib_mad *)out;
70 70
71 BUG_ON(in_mad_size != sizeof(*in_mad) || 71 if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
72 *out_mad_size != sizeof(*out_mad)); 72 *out_mad_size != sizeof(*out_mad)))
73 return IB_MAD_RESULT_FAILURE;
73 74
74 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); 75 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
75 76
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index 6b2418b74c99..7c3f2fb44ba5 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -209,8 +209,9 @@ int mthca_process_mad(struct ib_device *ibdev,
209 const struct ib_mad *in_mad = (const struct ib_mad *)in; 209 const struct ib_mad *in_mad = (const struct ib_mad *)in;
210 struct ib_mad *out_mad = (struct ib_mad *)out; 210 struct ib_mad *out_mad = (struct ib_mad *)out;
211 211
212 BUG_ON(in_mad_size != sizeof(*in_mad) || 212 if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
213 *out_mad_size != sizeof(*out_mad)); 213 *out_mad_size != sizeof(*out_mad)))
214 return IB_MAD_RESULT_FAILURE;
214 215
215 /* Forward locally generated traps to the SM */ 216 /* Forward locally generated traps to the SM */
216 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && 217 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP &&
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 9047af429906..8a3ad170d790 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1520,8 +1520,9 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
1520 int rc = arpindex; 1520 int rc = arpindex;
1521 struct net_device *netdev; 1521 struct net_device *netdev;
1522 struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter; 1522 struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter;
1523 __be32 dst_ipaddr = htonl(dst_ip);
1523 1524
1524 rt = ip_route_output(&init_net, htonl(dst_ip), 0, 0, 0); 1525 rt = ip_route_output(&init_net, dst_ipaddr, nesvnic->local_ipaddr, 0, 0);
1525 if (IS_ERR(rt)) { 1526 if (IS_ERR(rt)) {
1526 printk(KERN_ERR "%s: ip_route_output_key failed for 0x%08X\n", 1527 printk(KERN_ERR "%s: ip_route_output_key failed for 0x%08X\n",
1527 __func__, dst_ip); 1528 __func__, dst_ip);
@@ -1533,7 +1534,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
1533 else 1534 else
1534 netdev = nesvnic->netdev; 1535 netdev = nesvnic->netdev;
1535 1536
1536 neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, netdev); 1537 neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr);
1537 1538
1538 rcu_read_lock(); 1539 rcu_read_lock();
1539 if (neigh) { 1540 if (neigh) {
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 02120d340d50..4713dd7ed764 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -3861,7 +3861,7 @@ void nes_manage_arp_cache(struct net_device *netdev, unsigned char *mac_addr,
3861 (((u32)mac_addr[2]) << 24) | (((u32)mac_addr[3]) << 16) | 3861 (((u32)mac_addr[2]) << 24) | (((u32)mac_addr[3]) << 16) |
3862 (((u32)mac_addr[4]) << 8) | (u32)mac_addr[5]); 3862 (((u32)mac_addr[4]) << 8) | (u32)mac_addr[5]);
3863 cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_HIGH_IDX] = cpu_to_le32( 3863 cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_HIGH_IDX] = cpu_to_le32(
3864 (((u32)mac_addr[0]) << 16) | (u32)mac_addr[1]); 3864 (((u32)mac_addr[0]) << 8) | (u32)mac_addr[1]);
3865 } else { 3865 } else {
3866 cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_ADDR_LOW_IDX] = 0; 3866 cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_ADDR_LOW_IDX] = 0;
3867 cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_HIGH_IDX] = 0; 3867 cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_HIGH_IDX] = 0;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index b396344fae16..6a36338593cd 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#ifndef __OCRDMA_H__ 43#ifndef __OCRDMA_H__
29#define __OCRDMA_H__ 44#define __OCRDMA_H__
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
index 1554cca5712a..430b1350fe96 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#ifndef __OCRDMA_ABI_H__ 43#ifndef __OCRDMA_ABI_H__
29#define __OCRDMA_ABI_H__ 44#define __OCRDMA_ABI_H__
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index 4bafa15708d0..44766fee1f4e 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#include <net/neighbour.h> 43#include <net/neighbour.h>
29#include <net/netevent.h> 44#include <net/netevent.h>
@@ -215,8 +230,9 @@ int ocrdma_process_mad(struct ib_device *ibdev,
215 const struct ib_mad *in_mad = (const struct ib_mad *)in; 230 const struct ib_mad *in_mad = (const struct ib_mad *)in;
216 struct ib_mad *out_mad = (struct ib_mad *)out; 231 struct ib_mad *out_mad = (struct ib_mad *)out;
217 232
218 BUG_ON(in_mad_size != sizeof(*in_mad) || 233 if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
219 *out_mad_size != sizeof(*out_mad)); 234 *out_mad_size != sizeof(*out_mad)))
235 return IB_MAD_RESULT_FAILURE;
220 236
221 switch (in_mad->mad_hdr.mgmt_class) { 237 switch (in_mad->mad_hdr.mgmt_class) {
222 case IB_MGMT_CLASS_PERF_MGMT: 238 case IB_MGMT_CLASS_PERF_MGMT:
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
index cf366fe03cb8..04a30ae67473 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#ifndef __OCRDMA_AH_H__ 43#ifndef __OCRDMA_AH_H__
29#define __OCRDMA_AH_H__ 44#define __OCRDMA_AH_H__
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 47615ff33bc6..aab391a15db4 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) CNA Adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#include <linux/sched.h> 43#include <linux/sched.h>
29#include <linux/interrupt.h> 44#include <linux/interrupt.h>
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
index e905972fceb7..7ed885c1851e 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) CNA Adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#ifndef __OCRDMA_HW_H__ 43#ifndef __OCRDMA_HW_H__
29#define __OCRDMA_HW_H__ 44#define __OCRDMA_HW_H__
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 8a1398b253a2..b119a3413a15 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#include <linux/module.h> 43#include <linux/module.h>
29#include <linux/idr.h> 44#include <linux/idr.h>
@@ -46,7 +61,7 @@
46MODULE_VERSION(OCRDMA_ROCE_DRV_VERSION); 61MODULE_VERSION(OCRDMA_ROCE_DRV_VERSION);
47MODULE_DESCRIPTION(OCRDMA_ROCE_DRV_DESC " " OCRDMA_ROCE_DRV_VERSION); 62MODULE_DESCRIPTION(OCRDMA_ROCE_DRV_DESC " " OCRDMA_ROCE_DRV_VERSION);
48MODULE_AUTHOR("Emulex Corporation"); 63MODULE_AUTHOR("Emulex Corporation");
49MODULE_LICENSE("GPL"); 64MODULE_LICENSE("Dual BSD/GPL");
50 65
51static LIST_HEAD(ocrdma_dev_list); 66static LIST_HEAD(ocrdma_dev_list);
52static DEFINE_SPINLOCK(ocrdma_devlist_lock); 67static DEFINE_SPINLOCK(ocrdma_devlist_lock);
@@ -696,6 +711,7 @@ static void __exit ocrdma_exit_module(void)
696 ocrdma_unregister_inet6addr_notifier(); 711 ocrdma_unregister_inet6addr_notifier();
697 ocrdma_unregister_inetaddr_notifier(); 712 ocrdma_unregister_inetaddr_notifier();
698 ocrdma_rem_debugfs(); 713 ocrdma_rem_debugfs();
714 idr_destroy(&ocrdma_dev_id);
699} 715}
700 716
701module_init(ocrdma_init_module); 717module_init(ocrdma_init_module);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
index 02ad0aee99af..80006b24aa11 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#ifndef __OCRDMA_SLI_H__ 43#ifndef __OCRDMA_SLI_H__
29#define __OCRDMA_SLI_H__ 44#define __OCRDMA_SLI_H__
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index 48d7ef51aa0c..69334e214571 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2014 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#include <rdma/ib_addr.h> 43#include <rdma/ib_addr.h>
29#include <rdma/ib_pma.h> 44#include <rdma/ib_pma.h>
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
index 091edd68a8a3..c9e58d04c7b8 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2014 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#ifndef __OCRDMA_STATS_H__ 43#ifndef __OCRDMA_STATS_H__
29#define __OCRDMA_STATS_H__ 44#define __OCRDMA_STATS_H__
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 5bb61eb58f2c..bc84cd462ecf 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#include <linux/dma-mapping.h> 43#include <linux/dma-mapping.h>
29#include <rdma/ib_verbs.h> 44#include <rdma/ib_verbs.h>
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
index b15c608efa7b..eaccb2d3cb9f 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#ifndef __OCRDMA_VERBS_H__ 43#ifndef __OCRDMA_VERBS_H__
29#define __OCRDMA_VERBS_H__ 44#define __OCRDMA_VERBS_H__
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index 05e3242d8442..9625e7c438e5 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -2412,8 +2412,9 @@ int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
2412 const struct ib_mad *in_mad = (const struct ib_mad *)in; 2412 const struct ib_mad *in_mad = (const struct ib_mad *)in;
2413 struct ib_mad *out_mad = (struct ib_mad *)out; 2413 struct ib_mad *out_mad = (struct ib_mad *)out;
2414 2414
2415 BUG_ON(in_mad_size != sizeof(*in_mad) || 2415 if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
2416 *out_mad_size != sizeof(*out_mad)); 2416 *out_mad_size != sizeof(*out_mad)))
2417 return IB_MAD_RESULT_FAILURE;
2417 2418
2418 switch (in_mad->mad_hdr.mgmt_class) { 2419 switch (in_mad->mad_hdr.mgmt_class) {
2419 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: 2420 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index bd94b0a6e9e5..79859c4d43c9 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -239,7 +239,7 @@ struct ipoib_cm_tx {
239 struct net_device *dev; 239 struct net_device *dev;
240 struct ipoib_neigh *neigh; 240 struct ipoib_neigh *neigh;
241 struct ipoib_path *path; 241 struct ipoib_path *path;
242 struct ipoib_cm_tx_buf *tx_ring; 242 struct ipoib_tx_buf *tx_ring;
243 unsigned tx_head; 243 unsigned tx_head;
244 unsigned tx_tail; 244 unsigned tx_tail;
245 unsigned long flags; 245 unsigned long flags;
@@ -504,6 +504,33 @@ int ipoib_mcast_stop_thread(struct net_device *dev);
504void ipoib_mcast_dev_down(struct net_device *dev); 504void ipoib_mcast_dev_down(struct net_device *dev);
505void ipoib_mcast_dev_flush(struct net_device *dev); 505void ipoib_mcast_dev_flush(struct net_device *dev);
506 506
507int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req);
508void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv,
509 struct ipoib_tx_buf *tx_req);
510
511static inline void ipoib_build_sge(struct ipoib_dev_priv *priv,
512 struct ipoib_tx_buf *tx_req)
513{
514 int i, off;
515 struct sk_buff *skb = tx_req->skb;
516 skb_frag_t *frags = skb_shinfo(skb)->frags;
517 int nr_frags = skb_shinfo(skb)->nr_frags;
518 u64 *mapping = tx_req->mapping;
519
520 if (skb_headlen(skb)) {
521 priv->tx_sge[0].addr = mapping[0];
522 priv->tx_sge[0].length = skb_headlen(skb);
523 off = 1;
524 } else
525 off = 0;
526
527 for (i = 0; i < nr_frags; ++i) {
528 priv->tx_sge[i + off].addr = mapping[i + off];
529 priv->tx_sge[i + off].length = skb_frag_size(&frags[i]);
530 }
531 priv->tx_wr.num_sge = nr_frags + off;
532}
533
507#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 534#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
508struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev); 535struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev);
509int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter); 536int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index cf32a778e7d0..ee39be6ccfb0 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -694,14 +694,12 @@ repost:
694static inline int post_send(struct ipoib_dev_priv *priv, 694static inline int post_send(struct ipoib_dev_priv *priv,
695 struct ipoib_cm_tx *tx, 695 struct ipoib_cm_tx *tx,
696 unsigned int wr_id, 696 unsigned int wr_id,
697 u64 addr, int len) 697 struct ipoib_tx_buf *tx_req)
698{ 698{
699 struct ib_send_wr *bad_wr; 699 struct ib_send_wr *bad_wr;
700 700
701 priv->tx_sge[0].addr = addr; 701 ipoib_build_sge(priv, tx_req);
702 priv->tx_sge[0].length = len;
703 702
704 priv->tx_wr.num_sge = 1;
705 priv->tx_wr.wr_id = wr_id | IPOIB_OP_CM; 703 priv->tx_wr.wr_id = wr_id | IPOIB_OP_CM;
706 704
707 return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr); 705 return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr);
@@ -710,8 +708,7 @@ static inline int post_send(struct ipoib_dev_priv *priv,
710void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx) 708void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
711{ 709{
712 struct ipoib_dev_priv *priv = netdev_priv(dev); 710 struct ipoib_dev_priv *priv = netdev_priv(dev);
713 struct ipoib_cm_tx_buf *tx_req; 711 struct ipoib_tx_buf *tx_req;
714 u64 addr;
715 int rc; 712 int rc;
716 713
717 if (unlikely(skb->len > tx->mtu)) { 714 if (unlikely(skb->len > tx->mtu)) {
@@ -735,24 +732,21 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
735 */ 732 */
736 tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)]; 733 tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)];
737 tx_req->skb = skb; 734 tx_req->skb = skb;
738 addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE); 735
739 if (unlikely(ib_dma_mapping_error(priv->ca, addr))) { 736 if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
740 ++dev->stats.tx_errors; 737 ++dev->stats.tx_errors;
741 dev_kfree_skb_any(skb); 738 dev_kfree_skb_any(skb);
742 return; 739 return;
743 } 740 }
744 741
745 tx_req->mapping = addr;
746
747 skb_orphan(skb); 742 skb_orphan(skb);
748 skb_dst_drop(skb); 743 skb_dst_drop(skb);
749 744
750 rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), 745 rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), tx_req);
751 addr, skb->len);
752 if (unlikely(rc)) { 746 if (unlikely(rc)) {
753 ipoib_warn(priv, "post_send failed, error %d\n", rc); 747 ipoib_warn(priv, "post_send failed, error %d\n", rc);
754 ++dev->stats.tx_errors; 748 ++dev->stats.tx_errors;
755 ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE); 749 ipoib_dma_unmap_tx(priv, tx_req);
756 dev_kfree_skb_any(skb); 750 dev_kfree_skb_any(skb);
757 } else { 751 } else {
758 dev->trans_start = jiffies; 752 dev->trans_start = jiffies;
@@ -777,7 +771,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
777 struct ipoib_dev_priv *priv = netdev_priv(dev); 771 struct ipoib_dev_priv *priv = netdev_priv(dev);
778 struct ipoib_cm_tx *tx = wc->qp->qp_context; 772 struct ipoib_cm_tx *tx = wc->qp->qp_context;
779 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM; 773 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM;
780 struct ipoib_cm_tx_buf *tx_req; 774 struct ipoib_tx_buf *tx_req;
781 unsigned long flags; 775 unsigned long flags;
782 776
783 ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n", 777 ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n",
@@ -791,7 +785,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
791 785
792 tx_req = &tx->tx_ring[wr_id]; 786 tx_req = &tx->tx_ring[wr_id];
793 787
794 ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE); 788 ipoib_dma_unmap_tx(priv, tx_req);
795 789
796 /* FIXME: is this right? Shouldn't we only increment on success? */ 790 /* FIXME: is this right? Shouldn't we only increment on success? */
797 ++dev->stats.tx_packets; 791 ++dev->stats.tx_packets;
@@ -1036,6 +1030,9 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
1036 1030
1037 struct ib_qp *tx_qp; 1031 struct ib_qp *tx_qp;
1038 1032
1033 if (dev->features & NETIF_F_SG)
1034 attr.cap.max_send_sge = MAX_SKB_FRAGS + 1;
1035
1039 tx_qp = ib_create_qp(priv->pd, &attr); 1036 tx_qp = ib_create_qp(priv->pd, &attr);
1040 if (PTR_ERR(tx_qp) == -EINVAL) { 1037 if (PTR_ERR(tx_qp) == -EINVAL) {
1041 ipoib_warn(priv, "can't use GFP_NOIO for QPs on device %s, using GFP_KERNEL\n", 1038 ipoib_warn(priv, "can't use GFP_NOIO for QPs on device %s, using GFP_KERNEL\n",
@@ -1170,7 +1167,7 @@ err_tx:
1170static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p) 1167static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
1171{ 1168{
1172 struct ipoib_dev_priv *priv = netdev_priv(p->dev); 1169 struct ipoib_dev_priv *priv = netdev_priv(p->dev);
1173 struct ipoib_cm_tx_buf *tx_req; 1170 struct ipoib_tx_buf *tx_req;
1174 unsigned long begin; 1171 unsigned long begin;
1175 1172
1176 ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n", 1173 ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
@@ -1197,8 +1194,7 @@ timeout:
1197 1194
1198 while ((int) p->tx_tail - (int) p->tx_head < 0) { 1195 while ((int) p->tx_tail - (int) p->tx_head < 0) {
1199 tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)]; 1196 tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
1200 ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, 1197 ipoib_dma_unmap_tx(priv, tx_req);
1201 DMA_TO_DEVICE);
1202 dev_kfree_skb_any(tx_req->skb); 1198 dev_kfree_skb_any(tx_req->skb);
1203 ++p->tx_tail; 1199 ++p->tx_tail;
1204 netif_tx_lock_bh(p->dev); 1200 netif_tx_lock_bh(p->dev);
@@ -1455,7 +1451,6 @@ static void ipoib_cm_stale_task(struct work_struct *work)
1455 spin_unlock_irq(&priv->lock); 1451 spin_unlock_irq(&priv->lock);
1456} 1452}
1457 1453
1458
1459static ssize_t show_mode(struct device *d, struct device_attribute *attr, 1454static ssize_t show_mode(struct device *d, struct device_attribute *attr,
1460 char *buf) 1455 char *buf)
1461{ 1456{
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 63b92cbb29ad..d266667ca9b8 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -263,8 +263,7 @@ repost:
263 "for buf %d\n", wr_id); 263 "for buf %d\n", wr_id);
264} 264}
265 265
266static int ipoib_dma_map_tx(struct ib_device *ca, 266int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req)
267 struct ipoib_tx_buf *tx_req)
268{ 267{
269 struct sk_buff *skb = tx_req->skb; 268 struct sk_buff *skb = tx_req->skb;
270 u64 *mapping = tx_req->mapping; 269 u64 *mapping = tx_req->mapping;
@@ -305,8 +304,8 @@ partial_error:
305 return -EIO; 304 return -EIO;
306} 305}
307 306
308static void ipoib_dma_unmap_tx(struct ib_device *ca, 307void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv,
309 struct ipoib_tx_buf *tx_req) 308 struct ipoib_tx_buf *tx_req)
310{ 309{
311 struct sk_buff *skb = tx_req->skb; 310 struct sk_buff *skb = tx_req->skb;
312 u64 *mapping = tx_req->mapping; 311 u64 *mapping = tx_req->mapping;
@@ -314,7 +313,8 @@ static void ipoib_dma_unmap_tx(struct ib_device *ca,
314 int off; 313 int off;
315 314
316 if (skb_headlen(skb)) { 315 if (skb_headlen(skb)) {
317 ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE); 316 ib_dma_unmap_single(priv->ca, mapping[0], skb_headlen(skb),
317 DMA_TO_DEVICE);
318 off = 1; 318 off = 1;
319 } else 319 } else
320 off = 0; 320 off = 0;
@@ -322,8 +322,8 @@ static void ipoib_dma_unmap_tx(struct ib_device *ca,
322 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { 322 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
323 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 323 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
324 324
325 ib_dma_unmap_page(ca, mapping[i + off], skb_frag_size(frag), 325 ib_dma_unmap_page(priv->ca, mapping[i + off],
326 DMA_TO_DEVICE); 326 skb_frag_size(frag), DMA_TO_DEVICE);
327 } 327 }
328} 328}
329 329
@@ -389,7 +389,7 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
389 389
390 tx_req = &priv->tx_ring[wr_id]; 390 tx_req = &priv->tx_ring[wr_id];
391 391
392 ipoib_dma_unmap_tx(priv->ca, tx_req); 392 ipoib_dma_unmap_tx(priv, tx_req);
393 393
394 ++dev->stats.tx_packets; 394 ++dev->stats.tx_packets;
395 dev->stats.tx_bytes += tx_req->skb->len; 395 dev->stats.tx_bytes += tx_req->skb->len;
@@ -514,24 +514,10 @@ static inline int post_send(struct ipoib_dev_priv *priv,
514 void *head, int hlen) 514 void *head, int hlen)
515{ 515{
516 struct ib_send_wr *bad_wr; 516 struct ib_send_wr *bad_wr;
517 int i, off;
518 struct sk_buff *skb = tx_req->skb; 517 struct sk_buff *skb = tx_req->skb;
519 skb_frag_t *frags = skb_shinfo(skb)->frags;
520 int nr_frags = skb_shinfo(skb)->nr_frags;
521 u64 *mapping = tx_req->mapping;
522 518
523 if (skb_headlen(skb)) { 519 ipoib_build_sge(priv, tx_req);
524 priv->tx_sge[0].addr = mapping[0];
525 priv->tx_sge[0].length = skb_headlen(skb);
526 off = 1;
527 } else
528 off = 0;
529 520
530 for (i = 0; i < nr_frags; ++i) {
531 priv->tx_sge[i + off].addr = mapping[i + off];
532 priv->tx_sge[i + off].length = skb_frag_size(&frags[i]);
533 }
534 priv->tx_wr.num_sge = nr_frags + off;
535 priv->tx_wr.wr_id = wr_id; 521 priv->tx_wr.wr_id = wr_id;
536 priv->tx_wr.wr.ud.remote_qpn = qpn; 522 priv->tx_wr.wr.ud.remote_qpn = qpn;
537 priv->tx_wr.wr.ud.ah = address; 523 priv->tx_wr.wr.ud.ah = address;
@@ -617,7 +603,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
617 ipoib_warn(priv, "post_send failed, error %d\n", rc); 603 ipoib_warn(priv, "post_send failed, error %d\n", rc);
618 ++dev->stats.tx_errors; 604 ++dev->stats.tx_errors;
619 --priv->tx_outstanding; 605 --priv->tx_outstanding;
620 ipoib_dma_unmap_tx(priv->ca, tx_req); 606 ipoib_dma_unmap_tx(priv, tx_req);
621 dev_kfree_skb_any(skb); 607 dev_kfree_skb_any(skb);
622 if (netif_queue_stopped(dev)) 608 if (netif_queue_stopped(dev))
623 netif_wake_queue(dev); 609 netif_wake_queue(dev);
@@ -868,7 +854,7 @@ int ipoib_ib_dev_stop(struct net_device *dev)
868 while ((int) priv->tx_tail - (int) priv->tx_head < 0) { 854 while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
869 tx_req = &priv->tx_ring[priv->tx_tail & 855 tx_req = &priv->tx_ring[priv->tx_tail &
870 (ipoib_sendq_size - 1)]; 856 (ipoib_sendq_size - 1)];
871 ipoib_dma_unmap_tx(priv->ca, tx_req); 857 ipoib_dma_unmap_tx(priv, tx_req);
872 dev_kfree_skb_any(tx_req->skb); 858 dev_kfree_skb_any(tx_req->skb);
873 ++priv->tx_tail; 859 ++priv->tx_tail;
874 --priv->tx_outstanding; 860 --priv->tx_outstanding;
@@ -985,20 +971,21 @@ static inline int update_child_pkey(struct ipoib_dev_priv *priv)
985} 971}
986 972
987static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, 973static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
988 enum ipoib_flush_level level) 974 enum ipoib_flush_level level,
975 int nesting)
989{ 976{
990 struct ipoib_dev_priv *cpriv; 977 struct ipoib_dev_priv *cpriv;
991 struct net_device *dev = priv->dev; 978 struct net_device *dev = priv->dev;
992 int result; 979 int result;
993 980
994 down_read(&priv->vlan_rwsem); 981 down_read_nested(&priv->vlan_rwsem, nesting);
995 982
996 /* 983 /*
997 * Flush any child interfaces too -- they might be up even if 984 * Flush any child interfaces too -- they might be up even if
998 * the parent is down. 985 * the parent is down.
999 */ 986 */
1000 list_for_each_entry(cpriv, &priv->child_intfs, list) 987 list_for_each_entry(cpriv, &priv->child_intfs, list)
1001 __ipoib_ib_dev_flush(cpriv, level); 988 __ipoib_ib_dev_flush(cpriv, level, nesting + 1);
1002 989
1003 up_read(&priv->vlan_rwsem); 990 up_read(&priv->vlan_rwsem);
1004 991
@@ -1076,7 +1063,7 @@ void ipoib_ib_dev_flush_light(struct work_struct *work)
1076 struct ipoib_dev_priv *priv = 1063 struct ipoib_dev_priv *priv =
1077 container_of(work, struct ipoib_dev_priv, flush_light); 1064 container_of(work, struct ipoib_dev_priv, flush_light);
1078 1065
1079 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT); 1066 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT, 0);
1080} 1067}
1081 1068
1082void ipoib_ib_dev_flush_normal(struct work_struct *work) 1069void ipoib_ib_dev_flush_normal(struct work_struct *work)
@@ -1084,7 +1071,7 @@ void ipoib_ib_dev_flush_normal(struct work_struct *work)
1084 struct ipoib_dev_priv *priv = 1071 struct ipoib_dev_priv *priv =
1085 container_of(work, struct ipoib_dev_priv, flush_normal); 1072 container_of(work, struct ipoib_dev_priv, flush_normal);
1086 1073
1087 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL); 1074 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL, 0);
1088} 1075}
1089 1076
1090void ipoib_ib_dev_flush_heavy(struct work_struct *work) 1077void ipoib_ib_dev_flush_heavy(struct work_struct *work)
@@ -1092,7 +1079,7 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work)
1092 struct ipoib_dev_priv *priv = 1079 struct ipoib_dev_priv *priv =
1093 container_of(work, struct ipoib_dev_priv, flush_heavy); 1080 container_of(work, struct ipoib_dev_priv, flush_heavy);
1094 1081
1095 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY); 1082 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY, 0);
1096} 1083}
1097 1084
1098void ipoib_ib_dev_cleanup(struct net_device *dev) 1085void ipoib_ib_dev_cleanup(struct net_device *dev)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index da149c278cb8..b2943c84a5dd 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -190,7 +190,7 @@ static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_featu
190 struct ipoib_dev_priv *priv = netdev_priv(dev); 190 struct ipoib_dev_priv *priv = netdev_priv(dev);
191 191
192 if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags)) 192 if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
193 features &= ~(NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO); 193 features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
194 194
195 return features; 195 return features;
196} 196}
@@ -232,6 +232,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf)
232 ipoib_warn(priv, "enabling connected mode " 232 ipoib_warn(priv, "enabling connected mode "
233 "will cause multicast packet drops\n"); 233 "will cause multicast packet drops\n");
234 netdev_update_features(dev); 234 netdev_update_features(dev);
235 dev_set_mtu(dev, ipoib_cm_max_mtu(dev));
235 rtnl_unlock(); 236 rtnl_unlock();
236 priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM; 237 priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
237 238
@@ -1577,7 +1578,8 @@ static struct net_device *ipoib_add_port(const char *format,
1577 SET_NETDEV_DEV(priv->dev, hca->dma_device); 1578 SET_NETDEV_DEV(priv->dev, hca->dma_device);
1578 priv->dev->dev_id = port - 1; 1579 priv->dev->dev_id = port - 1;
1579 1580
1580 if (!ib_query_port(hca, port, &attr)) 1581 result = ib_query_port(hca, port, &attr);
1582 if (!result)
1581 priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu); 1583 priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
1582 else { 1584 else {
1583 printk(KERN_WARNING "%s: ib_query_port %d failed\n", 1585 printk(KERN_WARNING "%s: ib_query_port %d failed\n",
@@ -1598,7 +1600,8 @@ static struct net_device *ipoib_add_port(const char *format,
1598 goto device_init_failed; 1600 goto device_init_failed;
1599 } 1601 }
1600 1602
1601 if (ipoib_set_dev_features(priv, hca)) 1603 result = ipoib_set_dev_features(priv, hca);
1604 if (result)
1602 goto device_init_failed; 1605 goto device_init_failed;
1603 1606
1604 /* 1607 /*
@@ -1684,7 +1687,7 @@ static void ipoib_add_one(struct ib_device *device)
1684 struct list_head *dev_list; 1687 struct list_head *dev_list;
1685 struct net_device *dev; 1688 struct net_device *dev;
1686 struct ipoib_dev_priv *priv; 1689 struct ipoib_dev_priv *priv;
1687 int s, e, p; 1690 int p;
1688 int count = 0; 1691 int count = 0;
1689 1692
1690 dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL); 1693 dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
@@ -1693,15 +1696,7 @@ static void ipoib_add_one(struct ib_device *device)
1693 1696
1694 INIT_LIST_HEAD(dev_list); 1697 INIT_LIST_HEAD(dev_list);
1695 1698
1696 if (device->node_type == RDMA_NODE_IB_SWITCH) { 1699 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
1697 s = 0;
1698 e = 0;
1699 } else {
1700 s = 1;
1701 e = device->phys_port_cnt;
1702 }
1703
1704 for (p = s; p <= e; ++p) {
1705 if (!rdma_protocol_ib(device, p)) 1700 if (!rdma_protocol_ib(device, p))
1706 continue; 1701 continue;
1707 dev = ipoib_add_port("ib%d", device, p); 1702 dev = ipoib_add_port("ib%d", device, p);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index 9e6ee82a8fd7..851c8219d501 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -177,7 +177,8 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
177 else 177 else
178 size += ipoib_recvq_size * ipoib_max_conn_qp; 178 size += ipoib_recvq_size * ipoib_max_conn_qp;
179 } else 179 } else
180 goto out_free_wq; 180 if (ret != -ENOSYS)
181 goto out_free_wq;
181 182
182 cq_attr.cqe = size; 183 cq_attr.cqe = size;
183 priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL, 184 priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL,
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 771700963127..d851e1828d6f 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -775,6 +775,17 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
775 ret = isert_rdma_post_recvl(isert_conn); 775 ret = isert_rdma_post_recvl(isert_conn);
776 if (ret) 776 if (ret)
777 goto out_conn_dev; 777 goto out_conn_dev;
778 /*
779 * Obtain the second reference now before isert_rdma_accept() to
780 * ensure that any initiator generated REJECT CM event that occurs
781 * asynchronously won't drop the last reference until the error path
782 * in iscsi_target_login_sess_out() does it's ->iscsit_free_conn() ->
783 * isert_free_conn() -> isert_put_conn() -> kref_put().
784 */
785 if (!kref_get_unless_zero(&isert_conn->kref)) {
786 isert_warn("conn %p connect_release is running\n", isert_conn);
787 goto out_conn_dev;
788 }
778 789
779 ret = isert_rdma_accept(isert_conn); 790 ret = isert_rdma_accept(isert_conn);
780 if (ret) 791 if (ret)
@@ -836,11 +847,6 @@ isert_connected_handler(struct rdma_cm_id *cma_id)
836 847
837 isert_info("conn %p\n", isert_conn); 848 isert_info("conn %p\n", isert_conn);
838 849
839 if (!kref_get_unless_zero(&isert_conn->kref)) {
840 isert_warn("conn %p connect_release is running\n", isert_conn);
841 return;
842 }
843
844 mutex_lock(&isert_conn->mutex); 850 mutex_lock(&isert_conn->mutex);
845 if (isert_conn->state != ISER_CONN_FULL_FEATURE) 851 if (isert_conn->state != ISER_CONN_FULL_FEATURE)
846 isert_conn->state = ISER_CONN_UP; 852 isert_conn->state = ISER_CONN_UP;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 267dc4f75502..31a20b462266 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -161,13 +161,10 @@ static int srp_tmo_set(const char *val, const struct kernel_param *kp)
161{ 161{
162 int tmo, res; 162 int tmo, res;
163 163
164 if (strncmp(val, "off", 3) != 0) { 164 res = srp_parse_tmo(&tmo, val);
165 res = kstrtoint(val, 0, &tmo); 165 if (res)
166 if (res) 166 goto out;
167 goto out; 167
168 } else {
169 tmo = -1;
170 }
171 if (kp->arg == &srp_reconnect_delay) 168 if (kp->arg == &srp_reconnect_delay)
172 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo, 169 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
173 srp_dev_loss_tmo); 170 srp_dev_loss_tmo);
@@ -3379,7 +3376,7 @@ static void srp_add_one(struct ib_device *device)
3379 struct srp_device *srp_dev; 3376 struct srp_device *srp_dev;
3380 struct ib_device_attr *dev_attr; 3377 struct ib_device_attr *dev_attr;
3381 struct srp_host *host; 3378 struct srp_host *host;
3382 int mr_page_shift, s, e, p; 3379 int mr_page_shift, p;
3383 u64 max_pages_per_mr; 3380 u64 max_pages_per_mr;
3384 3381
3385 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL); 3382 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
@@ -3443,15 +3440,7 @@ static void srp_add_one(struct ib_device *device)
3443 if (IS_ERR(srp_dev->mr)) 3440 if (IS_ERR(srp_dev->mr))
3444 goto err_pd; 3441 goto err_pd;
3445 3442
3446 if (device->node_type == RDMA_NODE_IB_SWITCH) { 3443 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
3447 s = 0;
3448 e = 0;
3449 } else {
3450 s = 1;
3451 e = device->phys_port_cnt;
3452 }
3453
3454 for (p = s; p <= e; ++p) {
3455 host = srp_add_port(srp_dev, p); 3444 host = srp_add_port(srp_dev, p);
3456 if (host) 3445 if (host)
3457 list_add_tail(&host->list, &srp_dev->dev_list); 3446 list_add_tail(&host->list, &srp_dev->dev_list);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 82897ca17f32..60ff0a2390e5 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -302,7 +302,7 @@ static void srpt_get_iou(struct ib_dm_mad *mad)
302 int i; 302 int i;
303 303
304 ioui = (struct ib_dm_iou_info *)mad->data; 304 ioui = (struct ib_dm_iou_info *)mad->data;
305 ioui->change_id = __constant_cpu_to_be16(1); 305 ioui->change_id = cpu_to_be16(1);
306 ioui->max_controllers = 16; 306 ioui->max_controllers = 16;
307 307
308 /* set present for slot 1 and empty for the rest */ 308 /* set present for slot 1 and empty for the rest */
@@ -330,13 +330,13 @@ static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
330 330
331 if (!slot || slot > 16) { 331 if (!slot || slot > 16) {
332 mad->mad_hdr.status 332 mad->mad_hdr.status
333 = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD); 333 = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
334 return; 334 return;
335 } 335 }
336 336
337 if (slot > 2) { 337 if (slot > 2) {
338 mad->mad_hdr.status 338 mad->mad_hdr.status
339 = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC); 339 = cpu_to_be16(DM_MAD_STATUS_NO_IOC);
340 return; 340 return;
341 } 341 }
342 342
@@ -348,10 +348,10 @@ static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
348 iocp->device_version = cpu_to_be16(sdev->dev_attr.hw_ver); 348 iocp->device_version = cpu_to_be16(sdev->dev_attr.hw_ver);
349 iocp->subsys_vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id); 349 iocp->subsys_vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id);
350 iocp->subsys_device_id = 0x0; 350 iocp->subsys_device_id = 0x0;
351 iocp->io_class = __constant_cpu_to_be16(SRP_REV16A_IB_IO_CLASS); 351 iocp->io_class = cpu_to_be16(SRP_REV16A_IB_IO_CLASS);
352 iocp->io_subclass = __constant_cpu_to_be16(SRP_IO_SUBCLASS); 352 iocp->io_subclass = cpu_to_be16(SRP_IO_SUBCLASS);
353 iocp->protocol = __constant_cpu_to_be16(SRP_PROTOCOL); 353 iocp->protocol = cpu_to_be16(SRP_PROTOCOL);
354 iocp->protocol_version = __constant_cpu_to_be16(SRP_PROTOCOL_VERSION); 354 iocp->protocol_version = cpu_to_be16(SRP_PROTOCOL_VERSION);
355 iocp->send_queue_depth = cpu_to_be16(sdev->srq_size); 355 iocp->send_queue_depth = cpu_to_be16(sdev->srq_size);
356 iocp->rdma_read_depth = 4; 356 iocp->rdma_read_depth = 4;
357 iocp->send_size = cpu_to_be32(srp_max_req_size); 357 iocp->send_size = cpu_to_be32(srp_max_req_size);
@@ -379,13 +379,13 @@ static void srpt_get_svc_entries(u64 ioc_guid,
379 379
380 if (!slot || slot > 16) { 380 if (!slot || slot > 16) {
381 mad->mad_hdr.status 381 mad->mad_hdr.status
382 = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD); 382 = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
383 return; 383 return;
384 } 384 }
385 385
386 if (slot > 2 || lo > hi || hi > 1) { 386 if (slot > 2 || lo > hi || hi > 1) {
387 mad->mad_hdr.status 387 mad->mad_hdr.status
388 = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC); 388 = cpu_to_be16(DM_MAD_STATUS_NO_IOC);
389 return; 389 return;
390 } 390 }
391 391
@@ -436,7 +436,7 @@ static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad,
436 break; 436 break;
437 default: 437 default:
438 rsp_mad->mad_hdr.status = 438 rsp_mad->mad_hdr.status =
439 __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR); 439 cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
440 break; 440 break;
441 } 441 }
442} 442}
@@ -493,11 +493,11 @@ static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
493 break; 493 break;
494 case IB_MGMT_METHOD_SET: 494 case IB_MGMT_METHOD_SET:
495 dm_mad->mad_hdr.status = 495 dm_mad->mad_hdr.status =
496 __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR); 496 cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
497 break; 497 break;
498 default: 498 default:
499 dm_mad->mad_hdr.status = 499 dm_mad->mad_hdr.status =
500 __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD); 500 cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD);
501 break; 501 break;
502 } 502 }
503 503
@@ -1535,7 +1535,7 @@ static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
1535 memset(srp_rsp, 0, sizeof *srp_rsp); 1535 memset(srp_rsp, 0, sizeof *srp_rsp);
1536 srp_rsp->opcode = SRP_RSP; 1536 srp_rsp->opcode = SRP_RSP;
1537 srp_rsp->req_lim_delta = 1537 srp_rsp->req_lim_delta =
1538 __constant_cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0)); 1538 cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
1539 srp_rsp->tag = tag; 1539 srp_rsp->tag = tag;
1540 srp_rsp->status = status; 1540 srp_rsp->status = status;
1541 1541
@@ -1585,8 +1585,8 @@ static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
1585 memset(srp_rsp, 0, sizeof *srp_rsp); 1585 memset(srp_rsp, 0, sizeof *srp_rsp);
1586 1586
1587 srp_rsp->opcode = SRP_RSP; 1587 srp_rsp->opcode = SRP_RSP;
1588 srp_rsp->req_lim_delta = __constant_cpu_to_be32(1 1588 srp_rsp->req_lim_delta =
1589 + atomic_xchg(&ch->req_lim_delta, 0)); 1589 cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
1590 srp_rsp->tag = tag; 1590 srp_rsp->tag = tag;
1591 1591
1592 srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID; 1592 srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
@@ -1630,7 +1630,7 @@ static uint64_t srpt_unpack_lun(const uint8_t *lun, int len)
1630 switch (len) { 1630 switch (len) {
1631 case 8: 1631 case 8:
1632 if ((*((__be64 *)lun) & 1632 if ((*((__be64 *)lun) &
1633 __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0) 1633 cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
1634 goto out_err; 1634 goto out_err;
1635 break; 1635 break;
1636 case 4: 1636 case 4:
@@ -2449,8 +2449,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2449 } 2449 }
2450 2450
2451 if (it_iu_len > srp_max_req_size || it_iu_len < 64) { 2451 if (it_iu_len > srp_max_req_size || it_iu_len < 64) {
2452 rej->reason = __constant_cpu_to_be32( 2452 rej->reason = cpu_to_be32(
2453 SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE); 2453 SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
2454 ret = -EINVAL; 2454 ret = -EINVAL;
2455 pr_err("rejected SRP_LOGIN_REQ because its" 2455 pr_err("rejected SRP_LOGIN_REQ because its"
2456 " length (%d bytes) is out of range (%d .. %d)\n", 2456 " length (%d bytes) is out of range (%d .. %d)\n",
@@ -2459,8 +2459,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2459 } 2459 }
2460 2460
2461 if (!sport->enabled) { 2461 if (!sport->enabled) {
2462 rej->reason = __constant_cpu_to_be32( 2462 rej->reason = cpu_to_be32(
2463 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); 2463 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2464 ret = -EINVAL; 2464 ret = -EINVAL;
2465 pr_err("rejected SRP_LOGIN_REQ because the target port" 2465 pr_err("rejected SRP_LOGIN_REQ because the target port"
2466 " has not yet been enabled\n"); 2466 " has not yet been enabled\n");
@@ -2505,8 +2505,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2505 if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid) 2505 if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid)
2506 || *(__be64 *)(req->target_port_id + 8) != 2506 || *(__be64 *)(req->target_port_id + 8) !=
2507 cpu_to_be64(srpt_service_guid)) { 2507 cpu_to_be64(srpt_service_guid)) {
2508 rej->reason = __constant_cpu_to_be32( 2508 rej->reason = cpu_to_be32(
2509 SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL); 2509 SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
2510 ret = -ENOMEM; 2510 ret = -ENOMEM;
2511 pr_err("rejected SRP_LOGIN_REQ because it" 2511 pr_err("rejected SRP_LOGIN_REQ because it"
2512 " has an invalid target port identifier.\n"); 2512 " has an invalid target port identifier.\n");
@@ -2515,8 +2515,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2515 2515
2516 ch = kzalloc(sizeof *ch, GFP_KERNEL); 2516 ch = kzalloc(sizeof *ch, GFP_KERNEL);
2517 if (!ch) { 2517 if (!ch) {
2518 rej->reason = __constant_cpu_to_be32( 2518 rej->reason = cpu_to_be32(
2519 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); 2519 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2520 pr_err("rejected SRP_LOGIN_REQ because no memory.\n"); 2520 pr_err("rejected SRP_LOGIN_REQ because no memory.\n");
2521 ret = -ENOMEM; 2521 ret = -ENOMEM;
2522 goto reject; 2522 goto reject;
@@ -2552,8 +2552,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2552 2552
2553 ret = srpt_create_ch_ib(ch); 2553 ret = srpt_create_ch_ib(ch);
2554 if (ret) { 2554 if (ret) {
2555 rej->reason = __constant_cpu_to_be32( 2555 rej->reason = cpu_to_be32(
2556 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); 2556 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2557 pr_err("rejected SRP_LOGIN_REQ because creating" 2557 pr_err("rejected SRP_LOGIN_REQ because creating"
2558 " a new RDMA channel failed.\n"); 2558 " a new RDMA channel failed.\n");
2559 goto free_ring; 2559 goto free_ring;
@@ -2561,8 +2561,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2561 2561
2562 ret = srpt_ch_qp_rtr(ch, ch->qp); 2562 ret = srpt_ch_qp_rtr(ch, ch->qp);
2563 if (ret) { 2563 if (ret) {
2564 rej->reason = __constant_cpu_to_be32( 2564 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2565 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2566 pr_err("rejected SRP_LOGIN_REQ because enabling" 2565 pr_err("rejected SRP_LOGIN_REQ because enabling"
2567 " RTR failed (error code = %d)\n", ret); 2566 " RTR failed (error code = %d)\n", ret);
2568 goto destroy_ib; 2567 goto destroy_ib;
@@ -2580,15 +2579,15 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2580 if (!nacl) { 2579 if (!nacl) {
2581 pr_info("Rejected login because no ACL has been" 2580 pr_info("Rejected login because no ACL has been"
2582 " configured yet for initiator %s.\n", ch->sess_name); 2581 " configured yet for initiator %s.\n", ch->sess_name);
2583 rej->reason = __constant_cpu_to_be32( 2582 rej->reason = cpu_to_be32(
2584 SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED); 2583 SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
2585 goto destroy_ib; 2584 goto destroy_ib;
2586 } 2585 }
2587 2586
2588 ch->sess = transport_init_session(TARGET_PROT_NORMAL); 2587 ch->sess = transport_init_session(TARGET_PROT_NORMAL);
2589 if (IS_ERR(ch->sess)) { 2588 if (IS_ERR(ch->sess)) {
2590 rej->reason = __constant_cpu_to_be32( 2589 rej->reason = cpu_to_be32(
2591 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); 2590 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2592 pr_debug("Failed to create session\n"); 2591 pr_debug("Failed to create session\n");
2593 goto deregister_session; 2592 goto deregister_session;
2594 } 2593 }
@@ -2604,8 +2603,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2604 rsp->max_it_iu_len = req->req_it_iu_len; 2603 rsp->max_it_iu_len = req->req_it_iu_len;
2605 rsp->max_ti_iu_len = req->req_it_iu_len; 2604 rsp->max_ti_iu_len = req->req_it_iu_len;
2606 ch->max_ti_iu_len = it_iu_len; 2605 ch->max_ti_iu_len = it_iu_len;
2607 rsp->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT 2606 rsp->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT
2608 | SRP_BUF_FORMAT_INDIRECT); 2607 | SRP_BUF_FORMAT_INDIRECT);
2609 rsp->req_lim_delta = cpu_to_be32(ch->rq_size); 2608 rsp->req_lim_delta = cpu_to_be32(ch->rq_size);
2610 atomic_set(&ch->req_lim, ch->rq_size); 2609 atomic_set(&ch->req_lim, ch->rq_size);
2611 atomic_set(&ch->req_lim_delta, 0); 2610 atomic_set(&ch->req_lim_delta, 0);
@@ -2655,8 +2654,8 @@ free_ch:
2655reject: 2654reject:
2656 rej->opcode = SRP_LOGIN_REJ; 2655 rej->opcode = SRP_LOGIN_REJ;
2657 rej->tag = req->tag; 2656 rej->tag = req->tag;
2658 rej->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT 2657 rej->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT
2659 | SRP_BUF_FORMAT_INDIRECT); 2658 | SRP_BUF_FORMAT_INDIRECT);
2660 2659
2661 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, 2660 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
2662 (void *)rej, sizeof *rej); 2661 (void *)rej, sizeof *rej);
diff --git a/drivers/input/input-leds.c b/drivers/input/input-leds.c
index 074a65ed17bb..766bf2660116 100644
--- a/drivers/input/input-leds.c
+++ b/drivers/input/input-leds.c
@@ -71,6 +71,18 @@ static void input_leds_event(struct input_handle *handle, unsigned int type,
71{ 71{
72} 72}
73 73
74static int input_leds_get_count(struct input_dev *dev)
75{
76 unsigned int led_code;
77 int count = 0;
78
79 for_each_set_bit(led_code, dev->ledbit, LED_CNT)
80 if (input_led_info[led_code].name)
81 count++;
82
83 return count;
84}
85
74static int input_leds_connect(struct input_handler *handler, 86static int input_leds_connect(struct input_handler *handler,
75 struct input_dev *dev, 87 struct input_dev *dev,
76 const struct input_device_id *id) 88 const struct input_device_id *id)
@@ -81,7 +93,7 @@ static int input_leds_connect(struct input_handler *handler,
81 int led_no; 93 int led_no;
82 int error; 94 int error;
83 95
84 num_leds = bitmap_weight(dev->ledbit, LED_CNT); 96 num_leds = input_leds_get_count(dev);
85 if (!num_leds) 97 if (!num_leds)
86 return -ENXIO; 98 return -ENXIO;
87 99
@@ -112,7 +124,7 @@ static int input_leds_connect(struct input_handler *handler,
112 led->handle = &leds->handle; 124 led->handle = &leds->handle;
113 led->code = led_code; 125 led->code = led_code;
114 126
115 if (WARN_ON(!input_led_info[led_code].name)) 127 if (!input_led_info[led_code].name)
116 continue; 128 continue;
117 129
118 led->cdev.name = kasprintf(GFP_KERNEL, "%s::%s", 130 led->cdev.name = kasprintf(GFP_KERNEL, "%s::%s",
diff --git a/drivers/input/joystick/turbografx.c b/drivers/input/joystick/turbografx.c
index 27b6a3ce18ca..891797ad76bc 100644
--- a/drivers/input/joystick/turbografx.c
+++ b/drivers/input/joystick/turbografx.c
@@ -196,7 +196,7 @@ static struct tgfx __init *tgfx_probe(int parport, int *n_buttons, int n_devs)
196 if (n_buttons[i] < 1) 196 if (n_buttons[i] < 1)
197 continue; 197 continue;
198 198
199 if (n_buttons[i] > 6) { 199 if (n_buttons[i] > ARRAY_SIZE(tgfx_buttons)) {
200 printk(KERN_ERR "turbografx.c: Invalid number of buttons %d\n", n_buttons[i]); 200 printk(KERN_ERR "turbografx.c: Invalid number of buttons %d\n", n_buttons[i]);
201 err = -EINVAL; 201 err = -EINVAL;
202 goto err_unreg_devs; 202 goto err_unreg_devs;
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
index 097d7216d98e..c6dc644aa580 100644
--- a/drivers/input/keyboard/gpio_keys_polled.c
+++ b/drivers/input/keyboard/gpio_keys_polled.c
@@ -246,7 +246,7 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
246 * convert it to descriptor. 246 * convert it to descriptor.
247 */ 247 */
248 if (!button->gpiod && gpio_is_valid(button->gpio)) { 248 if (!button->gpiod && gpio_is_valid(button->gpio)) {
249 unsigned flags = 0; 249 unsigned flags = GPIOF_IN;
250 250
251 if (button->active_low) 251 if (button->active_low)
252 flags |= GPIOF_ACTIVE_LOW; 252 flags |= GPIOF_ACTIVE_LOW;
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index d4f0a817e858..c41dec819cdf 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -167,28 +167,16 @@ config INPUT_M68K_BEEP
167 depends on M68K 167 depends on M68K
168 168
169config INPUT_MAX77693_HAPTIC 169config INPUT_MAX77693_HAPTIC
170 tristate "MAXIM MAX77693 haptic controller support" 170 tristate "MAXIM MAX77693/MAX77843 haptic controller support"
171 depends on MFD_MAX77693 && PWM 171 depends on (MFD_MAX77693 || MFD_MAX77843) && PWM
172 select INPUT_FF_MEMLESS 172 select INPUT_FF_MEMLESS
173 help 173 help
174 This option enables support for the haptic controller on 174 This option enables support for the haptic controller on
175 MAXIM MAX77693 chip. 175 MAXIM MAX77693 and MAX77843 chips.
176 176
177 To compile this driver as module, choose M here: the 177 To compile this driver as module, choose M here: the
178 module will be called max77693-haptic. 178 module will be called max77693-haptic.
179 179
180config INPUT_MAX77843_HAPTIC
181 tristate "MAXIM MAX77843 haptic controller support"
182 depends on MFD_MAX77843 && REGULATOR
183 select INPUT_FF_MEMLESS
184 help
185 This option enables support for the haptic controller on
186 MAXIM MAX77843 chip. The driver supports ff-memless interface
187 from input framework.
188
189 To compile this driver as module, choose M here: the
190 module will be called max77843-haptic.
191
192config INPUT_MAX8925_ONKEY 180config INPUT_MAX8925_ONKEY
193 tristate "MAX8925 ONKEY support" 181 tristate "MAX8925 ONKEY support"
194 depends on MFD_MAX8925 182 depends on MFD_MAX8925
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 53df07dcc23c..0357a088c6a9 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -41,7 +41,6 @@ obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o
41obj-$(CONFIG_INPUT_KXTJ9) += kxtj9.o 41obj-$(CONFIG_INPUT_KXTJ9) += kxtj9.o
42obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o 42obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o
43obj-$(CONFIG_INPUT_MAX77693_HAPTIC) += max77693-haptic.o 43obj-$(CONFIG_INPUT_MAX77693_HAPTIC) += max77693-haptic.o
44obj-$(CONFIG_INPUT_MAX77843_HAPTIC) += max77843-haptic.o
45obj-$(CONFIG_INPUT_MAX8925_ONKEY) += max8925_onkey.o 44obj-$(CONFIG_INPUT_MAX8925_ONKEY) += max8925_onkey.o
46obj-$(CONFIG_INPUT_MAX8997_HAPTIC) += max8997_haptic.o 45obj-$(CONFIG_INPUT_MAX8997_HAPTIC) += max8997_haptic.o
47obj-$(CONFIG_INPUT_MC13783_PWRBUTTON) += mc13783-pwrbutton.o 46obj-$(CONFIG_INPUT_MC13783_PWRBUTTON) += mc13783-pwrbutton.o
diff --git a/drivers/input/misc/axp20x-pek.c b/drivers/input/misc/axp20x-pek.c
index 10e140af5aac..1ac898db303a 100644
--- a/drivers/input/misc/axp20x-pek.c
+++ b/drivers/input/misc/axp20x-pek.c
@@ -292,3 +292,4 @@ module_platform_driver(axp20x_pek_driver);
292MODULE_DESCRIPTION("axp20x Power Button"); 292MODULE_DESCRIPTION("axp20x Power Button");
293MODULE_AUTHOR("Carlo Caione <carlo@caione.org>"); 293MODULE_AUTHOR("Carlo Caione <carlo@caione.org>");
294MODULE_LICENSE("GPL"); 294MODULE_LICENSE("GPL");
295MODULE_ALIAS("platform:axp20x-pek");
diff --git a/drivers/input/misc/max77693-haptic.c b/drivers/input/misc/max77693-haptic.c
index 39e930c10ebb..6d96bff32a0e 100644
--- a/drivers/input/misc/max77693-haptic.c
+++ b/drivers/input/misc/max77693-haptic.c
@@ -1,8 +1,9 @@
1/* 1/*
2 * MAXIM MAX77693 Haptic device driver 2 * MAXIM MAX77693/MAX77843 Haptic device driver
3 * 3 *
4 * Copyright (C) 2014 Samsung Electronics 4 * Copyright (C) 2014,2015 Samsung Electronics
5 * Jaewon Kim <jaewon02.kim@samsung.com> 5 * Jaewon Kim <jaewon02.kim@samsung.com>
6 * Krzysztof Kozlowski <k.kozlowski@samsung.com>
6 * 7 *
7 * This program is not provided / owned by Maxim Integrated Products. 8 * This program is not provided / owned by Maxim Integrated Products.
8 * 9 *
@@ -24,7 +25,9 @@
24#include <linux/workqueue.h> 25#include <linux/workqueue.h>
25#include <linux/regulator/consumer.h> 26#include <linux/regulator/consumer.h>
26#include <linux/mfd/max77693.h> 27#include <linux/mfd/max77693.h>
28#include <linux/mfd/max77693-common.h>
27#include <linux/mfd/max77693-private.h> 29#include <linux/mfd/max77693-private.h>
30#include <linux/mfd/max77843-private.h>
28 31
29#define MAX_MAGNITUDE_SHIFT 16 32#define MAX_MAGNITUDE_SHIFT 16
30 33
@@ -46,6 +49,8 @@ enum max77693_haptic_pwm_divisor {
46}; 49};
47 50
48struct max77693_haptic { 51struct max77693_haptic {
52 enum max77693_types dev_type;
53
49 struct regmap *regmap_pmic; 54 struct regmap *regmap_pmic;
50 struct regmap *regmap_haptic; 55 struct regmap *regmap_haptic;
51 struct device *dev; 56 struct device *dev;
@@ -59,7 +64,6 @@ struct max77693_haptic {
59 unsigned int pwm_duty; 64 unsigned int pwm_duty;
60 enum max77693_haptic_motor_type type; 65 enum max77693_haptic_motor_type type;
61 enum max77693_haptic_pulse_mode mode; 66 enum max77693_haptic_pulse_mode mode;
62 enum max77693_haptic_pwm_divisor pwm_divisor;
63 67
64 struct work_struct work; 68 struct work_struct work;
65}; 69};
@@ -78,19 +82,52 @@ static int max77693_haptic_set_duty_cycle(struct max77693_haptic *haptic)
78 return 0; 82 return 0;
79} 83}
80 84
85static int max77843_haptic_bias(struct max77693_haptic *haptic, bool on)
86{
87 int error;
88
89 if (haptic->dev_type != TYPE_MAX77843)
90 return 0;
91
92 error = regmap_update_bits(haptic->regmap_haptic,
93 MAX77843_SYS_REG_MAINCTRL1,
94 MAX77843_MAINCTRL1_BIASEN_MASK,
95 on << MAINCTRL1_BIASEN_SHIFT);
96 if (error) {
97 dev_err(haptic->dev, "failed to %s bias: %d\n",
98 on ? "enable" : "disable", error);
99 return error;
100 }
101
102 return 0;
103}
104
81static int max77693_haptic_configure(struct max77693_haptic *haptic, 105static int max77693_haptic_configure(struct max77693_haptic *haptic,
82 bool enable) 106 bool enable)
83{ 107{
84 unsigned int value; 108 unsigned int value, config_reg;
85 int error; 109 int error;
86 110
87 value = ((haptic->type << MAX77693_CONFIG2_MODE) | 111 switch (haptic->dev_type) {
88 (enable << MAX77693_CONFIG2_MEN) | 112 case TYPE_MAX77693:
89 (haptic->mode << MAX77693_CONFIG2_HTYP) | 113 value = ((haptic->type << MAX77693_CONFIG2_MODE) |
90 (haptic->pwm_divisor)); 114 (enable << MAX77693_CONFIG2_MEN) |
115 (haptic->mode << MAX77693_CONFIG2_HTYP) |
116 MAX77693_HAPTIC_PWM_DIVISOR_128);
117 config_reg = MAX77693_HAPTIC_REG_CONFIG2;
118 break;
119 case TYPE_MAX77843:
120 value = (haptic->type << MCONFIG_MODE_SHIFT) |
121 (enable << MCONFIG_MEN_SHIFT) |
122 MAX77693_HAPTIC_PWM_DIVISOR_128;
123 config_reg = MAX77843_HAP_REG_MCONFIG;
124 break;
125 default:
126 return -EINVAL;
127 }
91 128
92 error = regmap_write(haptic->regmap_haptic, 129 error = regmap_write(haptic->regmap_haptic,
93 MAX77693_HAPTIC_REG_CONFIG2, value); 130 config_reg, value);
94 if (error) { 131 if (error) {
95 dev_err(haptic->dev, 132 dev_err(haptic->dev,
96 "failed to update haptic config: %d\n", error); 133 "failed to update haptic config: %d\n", error);
@@ -104,6 +141,9 @@ static int max77693_haptic_lowsys(struct max77693_haptic *haptic, bool enable)
104{ 141{
105 int error; 142 int error;
106 143
144 if (haptic->dev_type != TYPE_MAX77693)
145 return 0;
146
107 error = regmap_update_bits(haptic->regmap_pmic, 147 error = regmap_update_bits(haptic->regmap_pmic,
108 MAX77693_PMIC_REG_LSCNFG, 148 MAX77693_PMIC_REG_LSCNFG,
109 MAX77693_PMIC_LOW_SYS_MASK, 149 MAX77693_PMIC_LOW_SYS_MASK,
@@ -219,6 +259,10 @@ static int max77693_haptic_open(struct input_dev *dev)
219 struct max77693_haptic *haptic = input_get_drvdata(dev); 259 struct max77693_haptic *haptic = input_get_drvdata(dev);
220 int error; 260 int error;
221 261
262 error = max77843_haptic_bias(haptic, true);
263 if (error)
264 return error;
265
222 error = regulator_enable(haptic->motor_reg); 266 error = regulator_enable(haptic->motor_reg);
223 if (error) { 267 if (error) {
224 dev_err(haptic->dev, 268 dev_err(haptic->dev,
@@ -241,6 +285,8 @@ static void max77693_haptic_close(struct input_dev *dev)
241 if (error) 285 if (error)
242 dev_err(haptic->dev, 286 dev_err(haptic->dev,
243 "failed to disable regulator: %d\n", error); 287 "failed to disable regulator: %d\n", error);
288
289 max77843_haptic_bias(haptic, false);
244} 290}
245 291
246static int max77693_haptic_probe(struct platform_device *pdev) 292static int max77693_haptic_probe(struct platform_device *pdev)
@@ -254,13 +300,26 @@ static int max77693_haptic_probe(struct platform_device *pdev)
254 return -ENOMEM; 300 return -ENOMEM;
255 301
256 haptic->regmap_pmic = max77693->regmap; 302 haptic->regmap_pmic = max77693->regmap;
257 haptic->regmap_haptic = max77693->regmap_haptic;
258 haptic->dev = &pdev->dev; 303 haptic->dev = &pdev->dev;
259 haptic->type = MAX77693_HAPTIC_LRA; 304 haptic->type = MAX77693_HAPTIC_LRA;
260 haptic->mode = MAX77693_HAPTIC_EXTERNAL_MODE; 305 haptic->mode = MAX77693_HAPTIC_EXTERNAL_MODE;
261 haptic->pwm_divisor = MAX77693_HAPTIC_PWM_DIVISOR_128;
262 haptic->suspend_state = false; 306 haptic->suspend_state = false;
263 307
308 /* Variant-specific init */
309 haptic->dev_type = platform_get_device_id(pdev)->driver_data;
310 switch (haptic->dev_type) {
311 case TYPE_MAX77693:
312 haptic->regmap_haptic = max77693->regmap_haptic;
313 break;
314 case TYPE_MAX77843:
315 haptic->regmap_haptic = max77693->regmap;
316 break;
317 default:
318 dev_err(&pdev->dev, "unsupported device type: %u\n",
319 haptic->dev_type);
320 return -EINVAL;
321 }
322
264 INIT_WORK(&haptic->work, max77693_haptic_play_work); 323 INIT_WORK(&haptic->work, max77693_haptic_play_work);
265 324
266 /* Get pwm and regulatot for haptic device */ 325 /* Get pwm and regulatot for haptic device */
@@ -338,16 +397,25 @@ static int __maybe_unused max77693_haptic_resume(struct device *dev)
338static SIMPLE_DEV_PM_OPS(max77693_haptic_pm_ops, 397static SIMPLE_DEV_PM_OPS(max77693_haptic_pm_ops,
339 max77693_haptic_suspend, max77693_haptic_resume); 398 max77693_haptic_suspend, max77693_haptic_resume);
340 399
400static const struct platform_device_id max77693_haptic_id[] = {
401 { "max77693-haptic", TYPE_MAX77693 },
402 { "max77843-haptic", TYPE_MAX77843 },
403 {},
404};
405MODULE_DEVICE_TABLE(platform, max77693_haptic_id);
406
341static struct platform_driver max77693_haptic_driver = { 407static struct platform_driver max77693_haptic_driver = {
342 .driver = { 408 .driver = {
343 .name = "max77693-haptic", 409 .name = "max77693-haptic",
344 .pm = &max77693_haptic_pm_ops, 410 .pm = &max77693_haptic_pm_ops,
345 }, 411 },
346 .probe = max77693_haptic_probe, 412 .probe = max77693_haptic_probe,
413 .id_table = max77693_haptic_id,
347}; 414};
348module_platform_driver(max77693_haptic_driver); 415module_platform_driver(max77693_haptic_driver);
349 416
350MODULE_AUTHOR("Jaewon Kim <jaewon02.kim@samsung.com>"); 417MODULE_AUTHOR("Jaewon Kim <jaewon02.kim@samsung.com>");
351MODULE_DESCRIPTION("MAXIM MAX77693 Haptic driver"); 418MODULE_AUTHOR("Krzysztof Kozlowski <k.kozlowski@samsung.com>");
419MODULE_DESCRIPTION("MAXIM 77693/77843 Haptic driver");
352MODULE_ALIAS("platform:max77693-haptic"); 420MODULE_ALIAS("platform:max77693-haptic");
353MODULE_LICENSE("GPL"); 421MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/max77843-haptic.c b/drivers/input/misc/max77843-haptic.c
deleted file mode 100644
index dccbb465a055..000000000000
--- a/drivers/input/misc/max77843-haptic.c
+++ /dev/null
@@ -1,358 +0,0 @@
1/*
2 * MAXIM MAX77693 Haptic device driver
3 *
4 * Copyright (C) 2015 Samsung Electronics
5 * Author: Jaewon Kim <jaewon02.kim@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13#include <linux/err.h>
14#include <linux/i2c.h>
15#include <linux/init.h>
16#include <linux/input.h>
17#include <linux/mfd/max77843-private.h>
18#include <linux/module.h>
19#include <linux/platform_device.h>
20#include <linux/pwm.h>
21#include <linux/regmap.h>
22#include <linux/regulator/consumer.h>
23#include <linux/slab.h>
24#include <linux/workqueue.h>
25
26#define MAX_MAGNITUDE_SHIFT 16
27
28enum max77843_haptic_motor_type {
29 MAX77843_HAPTIC_ERM = 0,
30 MAX77843_HAPTIC_LRA,
31};
32
33enum max77843_haptic_pwm_divisor {
34 MAX77843_HAPTIC_PWM_DIVISOR_32 = 0,
35 MAX77843_HAPTIC_PWM_DIVISOR_64,
36 MAX77843_HAPTIC_PWM_DIVISOR_128,
37 MAX77843_HAPTIC_PWM_DIVISOR_256,
38};
39
40struct max77843_haptic {
41 struct regmap *regmap_haptic;
42 struct device *dev;
43 struct input_dev *input_dev;
44 struct pwm_device *pwm_dev;
45 struct regulator *motor_reg;
46 struct work_struct work;
47 struct mutex mutex;
48
49 unsigned int magnitude;
50 unsigned int pwm_duty;
51
52 bool active;
53 bool suspended;
54
55 enum max77843_haptic_motor_type type;
56 enum max77843_haptic_pwm_divisor pwm_divisor;
57};
58
59static int max77843_haptic_set_duty_cycle(struct max77843_haptic *haptic)
60{
61 int delta = (haptic->pwm_dev->period + haptic->pwm_duty) / 2;
62 int error;
63
64 error = pwm_config(haptic->pwm_dev, delta, haptic->pwm_dev->period);
65 if (error) {
66 dev_err(haptic->dev, "failed to configure pwm: %d\n", error);
67 return error;
68 }
69
70 return 0;
71}
72
73static int max77843_haptic_bias(struct max77843_haptic *haptic, bool on)
74{
75 int error;
76
77 error = regmap_update_bits(haptic->regmap_haptic,
78 MAX77843_SYS_REG_MAINCTRL1,
79 MAX77843_MAINCTRL1_BIASEN_MASK,
80 on << MAINCTRL1_BIASEN_SHIFT);
81 if (error) {
82 dev_err(haptic->dev, "failed to %s bias: %d\n",
83 on ? "enable" : "disable", error);
84 return error;
85 }
86
87 return 0;
88}
89
90static int max77843_haptic_config(struct max77843_haptic *haptic, bool enable)
91{
92 unsigned int value;
93 int error;
94
95 value = (haptic->type << MCONFIG_MODE_SHIFT) |
96 (enable << MCONFIG_MEN_SHIFT) |
97 (haptic->pwm_divisor << MCONFIG_PDIV_SHIFT);
98
99 error = regmap_write(haptic->regmap_haptic,
100 MAX77843_HAP_REG_MCONFIG, value);
101 if (error) {
102 dev_err(haptic->dev,
103 "failed to update haptic config: %d\n", error);
104 return error;
105 }
106
107 return 0;
108}
109
110static int max77843_haptic_enable(struct max77843_haptic *haptic)
111{
112 int error;
113
114 if (haptic->active)
115 return 0;
116
117 error = pwm_enable(haptic->pwm_dev);
118 if (error) {
119 dev_err(haptic->dev,
120 "failed to enable pwm device: %d\n", error);
121 return error;
122 }
123
124 error = max77843_haptic_config(haptic, true);
125 if (error)
126 goto err_config;
127
128 haptic->active = true;
129
130 return 0;
131
132err_config:
133 pwm_disable(haptic->pwm_dev);
134
135 return error;
136}
137
138static int max77843_haptic_disable(struct max77843_haptic *haptic)
139{
140 int error;
141
142 if (!haptic->active)
143 return 0;
144
145 error = max77843_haptic_config(haptic, false);
146 if (error)
147 return error;
148
149 pwm_disable(haptic->pwm_dev);
150
151 haptic->active = false;
152
153 return 0;
154}
155
156static void max77843_haptic_play_work(struct work_struct *work)
157{
158 struct max77843_haptic *haptic =
159 container_of(work, struct max77843_haptic, work);
160 int error;
161
162 mutex_lock(&haptic->mutex);
163
164 if (haptic->suspended)
165 goto out_unlock;
166
167 if (haptic->magnitude) {
168 error = max77843_haptic_set_duty_cycle(haptic);
169 if (error) {
170 dev_err(haptic->dev,
171 "failed to set duty cycle: %d\n", error);
172 goto out_unlock;
173 }
174
175 error = max77843_haptic_enable(haptic);
176 if (error)
177 dev_err(haptic->dev,
178 "cannot enable haptic: %d\n", error);
179 } else {
180 error = max77843_haptic_disable(haptic);
181 if (error)
182 dev_err(haptic->dev,
183 "cannot disable haptic: %d\n", error);
184 }
185
186out_unlock:
187 mutex_unlock(&haptic->mutex);
188}
189
190static int max77843_haptic_play_effect(struct input_dev *dev, void *data,
191 struct ff_effect *effect)
192{
193 struct max77843_haptic *haptic = input_get_drvdata(dev);
194 u64 period_mag_multi;
195
196 haptic->magnitude = effect->u.rumble.strong_magnitude;
197 if (!haptic->magnitude)
198 haptic->magnitude = effect->u.rumble.weak_magnitude;
199
200 period_mag_multi = (u64)haptic->pwm_dev->period * haptic->magnitude;
201 haptic->pwm_duty = (unsigned int)(period_mag_multi >>
202 MAX_MAGNITUDE_SHIFT);
203
204 schedule_work(&haptic->work);
205
206 return 0;
207}
208
209static int max77843_haptic_open(struct input_dev *dev)
210{
211 struct max77843_haptic *haptic = input_get_drvdata(dev);
212 int error;
213
214 error = max77843_haptic_bias(haptic, true);
215 if (error)
216 return error;
217
218 error = regulator_enable(haptic->motor_reg);
219 if (error) {
220 dev_err(haptic->dev,
221 "failed to enable regulator: %d\n", error);
222 return error;
223 }
224
225 return 0;
226}
227
228static void max77843_haptic_close(struct input_dev *dev)
229{
230 struct max77843_haptic *haptic = input_get_drvdata(dev);
231 int error;
232
233 cancel_work_sync(&haptic->work);
234 max77843_haptic_disable(haptic);
235
236 error = regulator_disable(haptic->motor_reg);
237 if (error)
238 dev_err(haptic->dev,
239 "failed to disable regulator: %d\n", error);
240
241 max77843_haptic_bias(haptic, false);
242}
243
244static int max77843_haptic_probe(struct platform_device *pdev)
245{
246 struct max77843 *max77843 = dev_get_drvdata(pdev->dev.parent);
247 struct max77843_haptic *haptic;
248 int error;
249
250 haptic = devm_kzalloc(&pdev->dev, sizeof(*haptic), GFP_KERNEL);
251 if (!haptic)
252 return -ENOMEM;
253
254 haptic->regmap_haptic = max77843->regmap;
255 haptic->dev = &pdev->dev;
256 haptic->type = MAX77843_HAPTIC_LRA;
257 haptic->pwm_divisor = MAX77843_HAPTIC_PWM_DIVISOR_128;
258
259 INIT_WORK(&haptic->work, max77843_haptic_play_work);
260 mutex_init(&haptic->mutex);
261
262 haptic->pwm_dev = devm_pwm_get(&pdev->dev, NULL);
263 if (IS_ERR(haptic->pwm_dev)) {
264 dev_err(&pdev->dev, "failed to get pwm device\n");
265 return PTR_ERR(haptic->pwm_dev);
266 }
267
268 haptic->motor_reg = devm_regulator_get_exclusive(&pdev->dev, "haptic");
269 if (IS_ERR(haptic->motor_reg)) {
270 dev_err(&pdev->dev, "failed to get regulator\n");
271 return PTR_ERR(haptic->motor_reg);
272 }
273
274 haptic->input_dev = devm_input_allocate_device(&pdev->dev);
275 if (!haptic->input_dev) {
276 dev_err(&pdev->dev, "failed to allocate input device\n");
277 return -ENOMEM;
278 }
279
280 haptic->input_dev->name = "max77843-haptic";
281 haptic->input_dev->id.version = 1;
282 haptic->input_dev->dev.parent = &pdev->dev;
283 haptic->input_dev->open = max77843_haptic_open;
284 haptic->input_dev->close = max77843_haptic_close;
285 input_set_drvdata(haptic->input_dev, haptic);
286 input_set_capability(haptic->input_dev, EV_FF, FF_RUMBLE);
287
288 error = input_ff_create_memless(haptic->input_dev, NULL,
289 max77843_haptic_play_effect);
290 if (error) {
291 dev_err(&pdev->dev, "failed to create force-feedback\n");
292 return error;
293 }
294
295 error = input_register_device(haptic->input_dev);
296 if (error) {
297 dev_err(&pdev->dev, "failed to register input device\n");
298 return error;
299 }
300
301 platform_set_drvdata(pdev, haptic);
302
303 return 0;
304}
305
306static int __maybe_unused max77843_haptic_suspend(struct device *dev)
307{
308 struct platform_device *pdev = to_platform_device(dev);
309 struct max77843_haptic *haptic = platform_get_drvdata(pdev);
310 int error;
311
312 error = mutex_lock_interruptible(&haptic->mutex);
313 if (error)
314 return error;
315
316 max77843_haptic_disable(haptic);
317
318 haptic->suspended = true;
319
320 mutex_unlock(&haptic->mutex);
321
322 return 0;
323}
324
325static int __maybe_unused max77843_haptic_resume(struct device *dev)
326{
327 struct platform_device *pdev = to_platform_device(dev);
328 struct max77843_haptic *haptic = platform_get_drvdata(pdev);
329 unsigned int magnitude;
330
331 mutex_lock(&haptic->mutex);
332
333 haptic->suspended = false;
334
335 magnitude = ACCESS_ONCE(haptic->magnitude);
336 if (magnitude)
337 max77843_haptic_enable(haptic);
338
339 mutex_unlock(&haptic->mutex);
340
341 return 0;
342}
343
344static SIMPLE_DEV_PM_OPS(max77843_haptic_pm_ops,
345 max77843_haptic_suspend, max77843_haptic_resume);
346
347static struct platform_driver max77843_haptic_driver = {
348 .driver = {
349 .name = "max77843-haptic",
350 .pm = &max77843_haptic_pm_ops,
351 },
352 .probe = max77843_haptic_probe,
353};
354module_platform_driver(max77843_haptic_driver);
355
356MODULE_AUTHOR("Jaewon Kim <jaewon02.kim@samsung.com>");
357MODULE_DESCRIPTION("MAXIM MAX77843 Haptic driver");
358MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index fc17b9592f54..10c4e3d462f1 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -183,7 +183,8 @@ static bool twl4030_vibra_check_coexist(struct twl4030_vibra_data *pdata,
183 if (pdata && pdata->coexist) 183 if (pdata && pdata->coexist)
184 return true; 184 return true;
185 185
186 if (of_find_node_by_name(node, "codec")) { 186 node = of_find_node_by_name(node, "codec");
187 if (node) {
187 of_node_put(node); 188 of_node_put(node);
188 return true; 189 return true;
189 } 190 }
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 113d6f1516a5..4d246861d692 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -20,6 +20,7 @@
20#include <linux/input/mt.h> 20#include <linux/input/mt.h>
21#include <linux/serio.h> 21#include <linux/serio.h>
22#include <linux/libps2.h> 22#include <linux/libps2.h>
23#include <linux/dmi.h>
23 24
24#include "psmouse.h" 25#include "psmouse.h"
25#include "alps.h" 26#include "alps.h"
@@ -99,6 +100,7 @@ static const struct alps_nibble_commands alps_v6_nibble_commands[] = {
99#define ALPS_FOUR_BUTTONS 0x40 /* 4 direction button present */ 100#define ALPS_FOUR_BUTTONS 0x40 /* 4 direction button present */
100#define ALPS_PS2_INTERLEAVED 0x80 /* 3-byte PS/2 packet interleaved with 101#define ALPS_PS2_INTERLEAVED 0x80 /* 3-byte PS/2 packet interleaved with
101 6-byte ALPS packet */ 102 6-byte ALPS packet */
103#define ALPS_DELL 0x100 /* device is a Dell laptop */
102#define ALPS_BUTTONPAD 0x200 /* device is a clickpad */ 104#define ALPS_BUTTONPAD 0x200 /* device is a clickpad */
103 105
104static const struct alps_model_info alps_model_data[] = { 106static const struct alps_model_info alps_model_data[] = {
@@ -251,9 +253,9 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse)
251 return; 253 return;
252 } 254 }
253 255
254 /* Non interleaved V2 dualpoint has separate stick button bits */ 256 /* Dell non interleaved V2 dualpoint has separate stick button bits */
255 if (priv->proto_version == ALPS_PROTO_V2 && 257 if (priv->proto_version == ALPS_PROTO_V2 &&
256 priv->flags == (ALPS_PASS | ALPS_DUALPOINT)) { 258 priv->flags == (ALPS_DELL | ALPS_PASS | ALPS_DUALPOINT)) {
257 left |= packet[0] & 1; 259 left |= packet[0] & 1;
258 right |= packet[0] & 2; 260 right |= packet[0] & 2;
259 middle |= packet[0] & 4; 261 middle |= packet[0] & 4;
@@ -2550,6 +2552,8 @@ static int alps_set_protocol(struct psmouse *psmouse,
2550 priv->byte0 = protocol->byte0; 2552 priv->byte0 = protocol->byte0;
2551 priv->mask0 = protocol->mask0; 2553 priv->mask0 = protocol->mask0;
2552 priv->flags = protocol->flags; 2554 priv->flags = protocol->flags;
2555 if (dmi_name_in_vendors("Dell"))
2556 priv->flags |= ALPS_DELL;
2553 2557
2554 priv->x_max = 2000; 2558 priv->x_max = 2000;
2555 priv->y_max = 1400; 2559 priv->y_max = 1400;
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index b10709f04615..30e3442518f8 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -2,6 +2,7 @@
2 * Apple USB BCM5974 (Macbook Air and Penryn Macbook Pro) multitouch driver 2 * Apple USB BCM5974 (Macbook Air and Penryn Macbook Pro) multitouch driver
3 * 3 *
4 * Copyright (C) 2008 Henrik Rydberg (rydberg@euromail.se) 4 * Copyright (C) 2008 Henrik Rydberg (rydberg@euromail.se)
5 * Copyright (C) 2015 John Horan (knasher@gmail.com)
5 * 6 *
6 * The USB initialization and package decoding was made by 7 * The USB initialization and package decoding was made by
7 * Scott Shawcroft as part of the touchd user-space driver project: 8 * Scott Shawcroft as part of the touchd user-space driver project:
@@ -91,6 +92,10 @@
91#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290 92#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
92#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291 93#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
93#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292 94#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
95/* MacbookPro12,1 (2015) */
96#define USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI 0x0272
97#define USB_DEVICE_ID_APPLE_WELLSPRING9_ISO 0x0273
98#define USB_DEVICE_ID_APPLE_WELLSPRING9_JIS 0x0274
94 99
95#define BCM5974_DEVICE(prod) { \ 100#define BCM5974_DEVICE(prod) { \
96 .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \ 101 .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \
@@ -152,6 +157,10 @@ static const struct usb_device_id bcm5974_table[] = {
152 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI), 157 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI),
153 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_ISO), 158 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_ISO),
154 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_JIS), 159 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_JIS),
160 /* MacbookPro12,1 */
161 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI),
162 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING9_ISO),
163 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING9_JIS),
155 /* Terminating entry */ 164 /* Terminating entry */
156 {} 165 {}
157}; 166};
@@ -180,21 +189,47 @@ struct bt_data {
180enum tp_type { 189enum tp_type {
181 TYPE1, /* plain trackpad */ 190 TYPE1, /* plain trackpad */
182 TYPE2, /* button integrated in trackpad */ 191 TYPE2, /* button integrated in trackpad */
183 TYPE3 /* additional header fields since June 2013 */ 192 TYPE3, /* additional header fields since June 2013 */
193 TYPE4 /* additional header field for pressure data */
184}; 194};
185 195
186/* trackpad finger data offsets, le16-aligned */ 196/* trackpad finger data offsets, le16-aligned */
187#define FINGER_TYPE1 (13 * sizeof(__le16)) 197#define HEADER_TYPE1 (13 * sizeof(__le16))
188#define FINGER_TYPE2 (15 * sizeof(__le16)) 198#define HEADER_TYPE2 (15 * sizeof(__le16))
189#define FINGER_TYPE3 (19 * sizeof(__le16)) 199#define HEADER_TYPE3 (19 * sizeof(__le16))
200#define HEADER_TYPE4 (23 * sizeof(__le16))
190 201
191/* trackpad button data offsets */ 202/* trackpad button data offsets */
203#define BUTTON_TYPE1 0
192#define BUTTON_TYPE2 15 204#define BUTTON_TYPE2 15
193#define BUTTON_TYPE3 23 205#define BUTTON_TYPE3 23
206#define BUTTON_TYPE4 31
194 207
195/* list of device capability bits */ 208/* list of device capability bits */
196#define HAS_INTEGRATED_BUTTON 1 209#define HAS_INTEGRATED_BUTTON 1
197 210
211/* trackpad finger data block size */
212#define FSIZE_TYPE1 (14 * sizeof(__le16))
213#define FSIZE_TYPE2 (14 * sizeof(__le16))
214#define FSIZE_TYPE3 (14 * sizeof(__le16))
215#define FSIZE_TYPE4 (15 * sizeof(__le16))
216
217/* offset from header to finger struct */
218#define DELTA_TYPE1 (0 * sizeof(__le16))
219#define DELTA_TYPE2 (0 * sizeof(__le16))
220#define DELTA_TYPE3 (0 * sizeof(__le16))
221#define DELTA_TYPE4 (1 * sizeof(__le16))
222
223/* usb control message mode switch data */
224#define USBMSG_TYPE1 8, 0x300, 0, 0, 0x1, 0x8
225#define USBMSG_TYPE2 8, 0x300, 0, 0, 0x1, 0x8
226#define USBMSG_TYPE3 8, 0x300, 0, 0, 0x1, 0x8
227#define USBMSG_TYPE4 2, 0x302, 2, 1, 0x1, 0x0
228
229/* Wellspring initialization constants */
230#define BCM5974_WELLSPRING_MODE_READ_REQUEST_ID 1
231#define BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID 9
232
198/* trackpad finger structure, le16-aligned */ 233/* trackpad finger structure, le16-aligned */
199struct tp_finger { 234struct tp_finger {
200 __le16 origin; /* zero when switching track finger */ 235 __le16 origin; /* zero when switching track finger */
@@ -207,14 +242,13 @@ struct tp_finger {
207 __le16 orientation; /* 16384 when point, else 15 bit angle */ 242 __le16 orientation; /* 16384 when point, else 15 bit angle */
208 __le16 touch_major; /* touch area, major axis */ 243 __le16 touch_major; /* touch area, major axis */
209 __le16 touch_minor; /* touch area, minor axis */ 244 __le16 touch_minor; /* touch area, minor axis */
210 __le16 unused[3]; /* zeros */ 245 __le16 unused[2]; /* zeros */
246 __le16 pressure; /* pressure on forcetouch touchpad */
211 __le16 multi; /* one finger: varies, more fingers: constant */ 247 __le16 multi; /* one finger: varies, more fingers: constant */
212} __attribute__((packed,aligned(2))); 248} __attribute__((packed,aligned(2)));
213 249
214/* trackpad finger data size, empirically at least ten fingers */ 250/* trackpad finger data size, empirically at least ten fingers */
215#define MAX_FINGERS 16 251#define MAX_FINGERS 16
216#define SIZEOF_FINGER sizeof(struct tp_finger)
217#define SIZEOF_ALL_FINGERS (MAX_FINGERS * SIZEOF_FINGER)
218#define MAX_FINGER_ORIENTATION 16384 252#define MAX_FINGER_ORIENTATION 16384
219 253
220/* device-specific parameters */ 254/* device-specific parameters */
@@ -232,8 +266,17 @@ struct bcm5974_config {
232 int bt_datalen; /* data length of the button interface */ 266 int bt_datalen; /* data length of the button interface */
233 int tp_ep; /* the endpoint of the trackpad interface */ 267 int tp_ep; /* the endpoint of the trackpad interface */
234 enum tp_type tp_type; /* type of trackpad interface */ 268 enum tp_type tp_type; /* type of trackpad interface */
235 int tp_offset; /* offset to trackpad finger data */ 269 int tp_header; /* bytes in header block */
236 int tp_datalen; /* data length of the trackpad interface */ 270 int tp_datalen; /* data length of the trackpad interface */
271 int tp_button; /* offset to button data */
272 int tp_fsize; /* bytes in single finger block */
273 int tp_delta; /* offset from header to finger struct */
274 int um_size; /* usb control message length */
275 int um_req_val; /* usb control message value */
276 int um_req_idx; /* usb control message index */
277 int um_switch_idx; /* usb control message mode switch index */
278 int um_switch_on; /* usb control message mode switch on */
279 int um_switch_off; /* usb control message mode switch off */
237 struct bcm5974_param p; /* finger pressure limits */ 280 struct bcm5974_param p; /* finger pressure limits */
238 struct bcm5974_param w; /* finger width limits */ 281 struct bcm5974_param w; /* finger width limits */
239 struct bcm5974_param x; /* horizontal limits */ 282 struct bcm5974_param x; /* horizontal limits */
@@ -259,6 +302,24 @@ struct bcm5974 {
259 int slots[MAX_FINGERS]; /* slot assignments */ 302 int slots[MAX_FINGERS]; /* slot assignments */
260}; 303};
261 304
305/* trackpad finger block data, le16-aligned */
306static const struct tp_finger *get_tp_finger(const struct bcm5974 *dev, int i)
307{
308 const struct bcm5974_config *c = &dev->cfg;
309 u8 *f_base = dev->tp_data + c->tp_header + c->tp_delta;
310
311 return (const struct tp_finger *)(f_base + i * c->tp_fsize);
312}
313
314#define DATAFORMAT(type) \
315 type, \
316 HEADER_##type, \
317 HEADER_##type + (MAX_FINGERS) * (FSIZE_##type), \
318 BUTTON_##type, \
319 FSIZE_##type, \
320 DELTA_##type, \
321 USBMSG_##type
322
262/* logical signal quality */ 323/* logical signal quality */
263#define SN_PRESSURE 45 /* pressure signal-to-noise ratio */ 324#define SN_PRESSURE 45 /* pressure signal-to-noise ratio */
264#define SN_WIDTH 25 /* width signal-to-noise ratio */ 325#define SN_WIDTH 25 /* width signal-to-noise ratio */
@@ -273,7 +334,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
273 USB_DEVICE_ID_APPLE_WELLSPRING_JIS, 334 USB_DEVICE_ID_APPLE_WELLSPRING_JIS,
274 0, 335 0,
275 0x84, sizeof(struct bt_data), 336 0x84, sizeof(struct bt_data),
276 0x81, TYPE1, FINGER_TYPE1, FINGER_TYPE1 + SIZEOF_ALL_FINGERS, 337 0x81, DATAFORMAT(TYPE1),
277 { SN_PRESSURE, 0, 256 }, 338 { SN_PRESSURE, 0, 256 },
278 { SN_WIDTH, 0, 2048 }, 339 { SN_WIDTH, 0, 2048 },
279 { SN_COORD, -4824, 5342 }, 340 { SN_COORD, -4824, 5342 },
@@ -286,7 +347,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
286 USB_DEVICE_ID_APPLE_WELLSPRING2_JIS, 347 USB_DEVICE_ID_APPLE_WELLSPRING2_JIS,
287 0, 348 0,
288 0x84, sizeof(struct bt_data), 349 0x84, sizeof(struct bt_data),
289 0x81, TYPE1, FINGER_TYPE1, FINGER_TYPE1 + SIZEOF_ALL_FINGERS, 350 0x81, DATAFORMAT(TYPE1),
290 { SN_PRESSURE, 0, 256 }, 351 { SN_PRESSURE, 0, 256 },
291 { SN_WIDTH, 0, 2048 }, 352 { SN_WIDTH, 0, 2048 },
292 { SN_COORD, -4824, 4824 }, 353 { SN_COORD, -4824, 4824 },
@@ -299,7 +360,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
299 USB_DEVICE_ID_APPLE_WELLSPRING3_JIS, 360 USB_DEVICE_ID_APPLE_WELLSPRING3_JIS,
300 HAS_INTEGRATED_BUTTON, 361 HAS_INTEGRATED_BUTTON,
301 0x84, sizeof(struct bt_data), 362 0x84, sizeof(struct bt_data),
302 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 363 0x81, DATAFORMAT(TYPE2),
303 { SN_PRESSURE, 0, 300 }, 364 { SN_PRESSURE, 0, 300 },
304 { SN_WIDTH, 0, 2048 }, 365 { SN_WIDTH, 0, 2048 },
305 { SN_COORD, -4460, 5166 }, 366 { SN_COORD, -4460, 5166 },
@@ -312,7 +373,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
312 USB_DEVICE_ID_APPLE_WELLSPRING4_JIS, 373 USB_DEVICE_ID_APPLE_WELLSPRING4_JIS,
313 HAS_INTEGRATED_BUTTON, 374 HAS_INTEGRATED_BUTTON,
314 0x84, sizeof(struct bt_data), 375 0x84, sizeof(struct bt_data),
315 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 376 0x81, DATAFORMAT(TYPE2),
316 { SN_PRESSURE, 0, 300 }, 377 { SN_PRESSURE, 0, 300 },
317 { SN_WIDTH, 0, 2048 }, 378 { SN_WIDTH, 0, 2048 },
318 { SN_COORD, -4620, 5140 }, 379 { SN_COORD, -4620, 5140 },
@@ -325,7 +386,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
325 USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS, 386 USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS,
326 HAS_INTEGRATED_BUTTON, 387 HAS_INTEGRATED_BUTTON,
327 0x84, sizeof(struct bt_data), 388 0x84, sizeof(struct bt_data),
328 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 389 0x81, DATAFORMAT(TYPE2),
329 { SN_PRESSURE, 0, 300 }, 390 { SN_PRESSURE, 0, 300 },
330 { SN_WIDTH, 0, 2048 }, 391 { SN_WIDTH, 0, 2048 },
331 { SN_COORD, -4616, 5112 }, 392 { SN_COORD, -4616, 5112 },
@@ -338,7 +399,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
338 USB_DEVICE_ID_APPLE_WELLSPRING5_JIS, 399 USB_DEVICE_ID_APPLE_WELLSPRING5_JIS,
339 HAS_INTEGRATED_BUTTON, 400 HAS_INTEGRATED_BUTTON,
340 0x84, sizeof(struct bt_data), 401 0x84, sizeof(struct bt_data),
341 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 402 0x81, DATAFORMAT(TYPE2),
342 { SN_PRESSURE, 0, 300 }, 403 { SN_PRESSURE, 0, 300 },
343 { SN_WIDTH, 0, 2048 }, 404 { SN_WIDTH, 0, 2048 },
344 { SN_COORD, -4415, 5050 }, 405 { SN_COORD, -4415, 5050 },
@@ -351,7 +412,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
351 USB_DEVICE_ID_APPLE_WELLSPRING6_JIS, 412 USB_DEVICE_ID_APPLE_WELLSPRING6_JIS,
352 HAS_INTEGRATED_BUTTON, 413 HAS_INTEGRATED_BUTTON,
353 0x84, sizeof(struct bt_data), 414 0x84, sizeof(struct bt_data),
354 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 415 0x81, DATAFORMAT(TYPE2),
355 { SN_PRESSURE, 0, 300 }, 416 { SN_PRESSURE, 0, 300 },
356 { SN_WIDTH, 0, 2048 }, 417 { SN_WIDTH, 0, 2048 },
357 { SN_COORD, -4620, 5140 }, 418 { SN_COORD, -4620, 5140 },
@@ -364,7 +425,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
364 USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS, 425 USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS,
365 HAS_INTEGRATED_BUTTON, 426 HAS_INTEGRATED_BUTTON,
366 0x84, sizeof(struct bt_data), 427 0x84, sizeof(struct bt_data),
367 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 428 0x81, DATAFORMAT(TYPE2),
368 { SN_PRESSURE, 0, 300 }, 429 { SN_PRESSURE, 0, 300 },
369 { SN_WIDTH, 0, 2048 }, 430 { SN_WIDTH, 0, 2048 },
370 { SN_COORD, -4750, 5280 }, 431 { SN_COORD, -4750, 5280 },
@@ -377,7 +438,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
377 USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS, 438 USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS,
378 HAS_INTEGRATED_BUTTON, 439 HAS_INTEGRATED_BUTTON,
379 0x84, sizeof(struct bt_data), 440 0x84, sizeof(struct bt_data),
380 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 441 0x81, DATAFORMAT(TYPE2),
381 { SN_PRESSURE, 0, 300 }, 442 { SN_PRESSURE, 0, 300 },
382 { SN_WIDTH, 0, 2048 }, 443 { SN_WIDTH, 0, 2048 },
383 { SN_COORD, -4620, 5140 }, 444 { SN_COORD, -4620, 5140 },
@@ -390,7 +451,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
390 USB_DEVICE_ID_APPLE_WELLSPRING7_JIS, 451 USB_DEVICE_ID_APPLE_WELLSPRING7_JIS,
391 HAS_INTEGRATED_BUTTON, 452 HAS_INTEGRATED_BUTTON,
392 0x84, sizeof(struct bt_data), 453 0x84, sizeof(struct bt_data),
393 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 454 0x81, DATAFORMAT(TYPE2),
394 { SN_PRESSURE, 0, 300 }, 455 { SN_PRESSURE, 0, 300 },
395 { SN_WIDTH, 0, 2048 }, 456 { SN_WIDTH, 0, 2048 },
396 { SN_COORD, -4750, 5280 }, 457 { SN_COORD, -4750, 5280 },
@@ -403,7 +464,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
403 USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS, 464 USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS,
404 HAS_INTEGRATED_BUTTON, 465 HAS_INTEGRATED_BUTTON,
405 0x84, sizeof(struct bt_data), 466 0x84, sizeof(struct bt_data),
406 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 467 0x81, DATAFORMAT(TYPE2),
407 { SN_PRESSURE, 0, 300 }, 468 { SN_PRESSURE, 0, 300 },
408 { SN_WIDTH, 0, 2048 }, 469 { SN_WIDTH, 0, 2048 },
409 { SN_COORD, -4750, 5280 }, 470 { SN_COORD, -4750, 5280 },
@@ -416,13 +477,26 @@ static const struct bcm5974_config bcm5974_config_table[] = {
416 USB_DEVICE_ID_APPLE_WELLSPRING8_JIS, 477 USB_DEVICE_ID_APPLE_WELLSPRING8_JIS,
417 HAS_INTEGRATED_BUTTON, 478 HAS_INTEGRATED_BUTTON,
418 0, sizeof(struct bt_data), 479 0, sizeof(struct bt_data),
419 0x83, TYPE3, FINGER_TYPE3, FINGER_TYPE3 + SIZEOF_ALL_FINGERS, 480 0x83, DATAFORMAT(TYPE3),
420 { SN_PRESSURE, 0, 300 }, 481 { SN_PRESSURE, 0, 300 },
421 { SN_WIDTH, 0, 2048 }, 482 { SN_WIDTH, 0, 2048 },
422 { SN_COORD, -4620, 5140 }, 483 { SN_COORD, -4620, 5140 },
423 { SN_COORD, -150, 6600 }, 484 { SN_COORD, -150, 6600 },
424 { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION } 485 { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION }
425 }, 486 },
487 {
488 USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI,
489 USB_DEVICE_ID_APPLE_WELLSPRING9_ISO,
490 USB_DEVICE_ID_APPLE_WELLSPRING9_JIS,
491 HAS_INTEGRATED_BUTTON,
492 0, sizeof(struct bt_data),
493 0x83, DATAFORMAT(TYPE4),
494 { SN_PRESSURE, 0, 300 },
495 { SN_WIDTH, 0, 2048 },
496 { SN_COORD, -4828, 5345 },
497 { SN_COORD, -203, 6803 },
498 { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION }
499 },
426 {} 500 {}
427}; 501};
428 502
@@ -549,19 +623,18 @@ static int report_tp_state(struct bcm5974 *dev, int size)
549 struct input_dev *input = dev->input; 623 struct input_dev *input = dev->input;
550 int raw_n, i, n = 0; 624 int raw_n, i, n = 0;
551 625
552 if (size < c->tp_offset || (size - c->tp_offset) % SIZEOF_FINGER != 0) 626 if (size < c->tp_header || (size - c->tp_header) % c->tp_fsize != 0)
553 return -EIO; 627 return -EIO;
554 628
555 /* finger data, le16-aligned */ 629 raw_n = (size - c->tp_header) / c->tp_fsize;
556 f = (const struct tp_finger *)(dev->tp_data + c->tp_offset);
557 raw_n = (size - c->tp_offset) / SIZEOF_FINGER;
558 630
559 for (i = 0; i < raw_n; i++) { 631 for (i = 0; i < raw_n; i++) {
560 if (raw2int(f[i].touch_major) == 0) 632 f = get_tp_finger(dev, i);
633 if (raw2int(f->touch_major) == 0)
561 continue; 634 continue;
562 dev->pos[n].x = raw2int(f[i].abs_x); 635 dev->pos[n].x = raw2int(f->abs_x);
563 dev->pos[n].y = c->y.min + c->y.max - raw2int(f[i].abs_y); 636 dev->pos[n].y = c->y.min + c->y.max - raw2int(f->abs_y);
564 dev->index[n++] = &f[i]; 637 dev->index[n++] = f;
565 } 638 }
566 639
567 input_mt_assign_slots(input, dev->slots, dev->pos, n, 0); 640 input_mt_assign_slots(input, dev->slots, dev->pos, n, 0);
@@ -572,32 +645,22 @@ static int report_tp_state(struct bcm5974 *dev, int size)
572 645
573 input_mt_sync_frame(input); 646 input_mt_sync_frame(input);
574 647
575 report_synaptics_data(input, c, f, raw_n); 648 report_synaptics_data(input, c, get_tp_finger(dev, 0), raw_n);
576 649
577 /* type 2 reports button events via ibt only */ 650 /* later types report button events via integrated button only */
578 if (c->tp_type == TYPE2) { 651 if (c->caps & HAS_INTEGRATED_BUTTON) {
579 int ibt = raw2int(dev->tp_data[BUTTON_TYPE2]); 652 int ibt = raw2int(dev->tp_data[c->tp_button]);
580 input_report_key(input, BTN_LEFT, ibt); 653 input_report_key(input, BTN_LEFT, ibt);
581 } 654 }
582 655
583 if (c->tp_type == TYPE3)
584 input_report_key(input, BTN_LEFT, dev->tp_data[BUTTON_TYPE3]);
585
586 input_sync(input); 656 input_sync(input);
587 657
588 return 0; 658 return 0;
589} 659}
590 660
591/* Wellspring initialization constants */
592#define BCM5974_WELLSPRING_MODE_READ_REQUEST_ID 1
593#define BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID 9
594#define BCM5974_WELLSPRING_MODE_REQUEST_VALUE 0x300
595#define BCM5974_WELLSPRING_MODE_REQUEST_INDEX 0
596#define BCM5974_WELLSPRING_MODE_VENDOR_VALUE 0x01
597#define BCM5974_WELLSPRING_MODE_NORMAL_VALUE 0x08
598
599static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on) 661static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on)
600{ 662{
663 const struct bcm5974_config *c = &dev->cfg;
601 int retval = 0, size; 664 int retval = 0, size;
602 char *data; 665 char *data;
603 666
@@ -605,7 +668,7 @@ static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on)
605 if (dev->cfg.tp_type == TYPE3) 668 if (dev->cfg.tp_type == TYPE3)
606 return 0; 669 return 0;
607 670
608 data = kmalloc(8, GFP_KERNEL); 671 data = kmalloc(c->um_size, GFP_KERNEL);
609 if (!data) { 672 if (!data) {
610 dev_err(&dev->intf->dev, "out of memory\n"); 673 dev_err(&dev->intf->dev, "out of memory\n");
611 retval = -ENOMEM; 674 retval = -ENOMEM;
@@ -616,28 +679,24 @@ static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on)
616 size = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), 679 size = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
617 BCM5974_WELLSPRING_MODE_READ_REQUEST_ID, 680 BCM5974_WELLSPRING_MODE_READ_REQUEST_ID,
618 USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, 681 USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
619 BCM5974_WELLSPRING_MODE_REQUEST_VALUE, 682 c->um_req_val, c->um_req_idx, data, c->um_size, 5000);
620 BCM5974_WELLSPRING_MODE_REQUEST_INDEX, data, 8, 5000);
621 683
622 if (size != 8) { 684 if (size != c->um_size) {
623 dev_err(&dev->intf->dev, "could not read from device\n"); 685 dev_err(&dev->intf->dev, "could not read from device\n");
624 retval = -EIO; 686 retval = -EIO;
625 goto out; 687 goto out;
626 } 688 }
627 689
628 /* apply the mode switch */ 690 /* apply the mode switch */
629 data[0] = on ? 691 data[c->um_switch_idx] = on ? c->um_switch_on : c->um_switch_off;
630 BCM5974_WELLSPRING_MODE_VENDOR_VALUE :
631 BCM5974_WELLSPRING_MODE_NORMAL_VALUE;
632 692
633 /* write configuration */ 693 /* write configuration */
634 size = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 694 size = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
635 BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID, 695 BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID,
636 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, 696 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
637 BCM5974_WELLSPRING_MODE_REQUEST_VALUE, 697 c->um_req_val, c->um_req_idx, data, c->um_size, 5000);
638 BCM5974_WELLSPRING_MODE_REQUEST_INDEX, data, 8, 5000);
639 698
640 if (size != 8) { 699 if (size != c->um_size) {
641 dev_err(&dev->intf->dev, "could not write to device\n"); 700 dev_err(&dev->intf->dev, "could not write to device\n");
642 retval = -EIO; 701 retval = -EIO;
643 goto out; 702 goto out;
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index ce3d40004458..2955f1d0ca6c 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -783,19 +783,26 @@ static int elantech_packet_check_v4(struct psmouse *psmouse)
783 struct elantech_data *etd = psmouse->private; 783 struct elantech_data *etd = psmouse->private;
784 unsigned char *packet = psmouse->packet; 784 unsigned char *packet = psmouse->packet;
785 unsigned char packet_type = packet[3] & 0x03; 785 unsigned char packet_type = packet[3] & 0x03;
786 unsigned int ic_version;
786 bool sanity_check; 787 bool sanity_check;
787 788
788 if (etd->tp_dev && (packet[3] & 0x0f) == 0x06) 789 if (etd->tp_dev && (packet[3] & 0x0f) == 0x06)
789 return PACKET_TRACKPOINT; 790 return PACKET_TRACKPOINT;
790 791
792 /* This represents the version of IC body. */
793 ic_version = (etd->fw_version & 0x0f0000) >> 16;
794
791 /* 795 /*
792 * Sanity check based on the constant bits of a packet. 796 * Sanity check based on the constant bits of a packet.
793 * The constant bits change depending on the value of 797 * The constant bits change depending on the value of
794 * the hardware flag 'crc_enabled' but are the same for 798 * the hardware flag 'crc_enabled' and the version of
795 * every packet, regardless of the type. 799 * the IC body, but are the same for every packet,
800 * regardless of the type.
796 */ 801 */
797 if (etd->crc_enabled) 802 if (etd->crc_enabled)
798 sanity_check = ((packet[3] & 0x08) == 0x00); 803 sanity_check = ((packet[3] & 0x08) == 0x00);
804 else if (ic_version == 7 && etd->samples[1] == 0x2A)
805 sanity_check = ((packet[3] & 0x1c) == 0x10);
799 else 806 else
800 sanity_check = ((packet[0] & 0x0c) == 0x04 && 807 sanity_check = ((packet[0] & 0x0c) == 0x04 &&
801 (packet[3] & 0x1c) == 0x10); 808 (packet[3] & 0x1c) == 0x10);
@@ -1116,6 +1123,7 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
1116 * Avatar AVIU-145A2 0x361f00 ? clickpad 1123 * Avatar AVIU-145A2 0x361f00 ? clickpad
1117 * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons 1124 * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
1118 * Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons 1125 * Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons
1126 * Fujitsu T725 0x470f01 05, 12, 09 2 hw buttons
1119 * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**) 1127 * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**)
1120 * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons 1128 * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons
1121 * Lenovo L430 0x350f02 b9, 15, 0c 2 hw buttons (*) 1129 * Lenovo L430 0x350f02 b9, 15, 0c 2 hw buttons (*)
@@ -1167,7 +1175,7 @@ static int elantech_set_input_params(struct psmouse *psmouse)
1167 struct input_dev *dev = psmouse->dev; 1175 struct input_dev *dev = psmouse->dev;
1168 struct elantech_data *etd = psmouse->private; 1176 struct elantech_data *etd = psmouse->private;
1169 unsigned int x_min = 0, y_min = 0, x_max = 0, y_max = 0, width = 0; 1177 unsigned int x_min = 0, y_min = 0, x_max = 0, y_max = 0, width = 0;
1170 unsigned int x_res = 0, y_res = 0; 1178 unsigned int x_res = 31, y_res = 31;
1171 1179
1172 if (elantech_set_range(psmouse, &x_min, &y_min, &x_max, &y_max, &width)) 1180 if (elantech_set_range(psmouse, &x_min, &y_min, &x_max, &y_max, &width))
1173 return -1; 1181 return -1;
@@ -1232,8 +1240,6 @@ static int elantech_set_input_params(struct psmouse *psmouse)
1232 /* For X to recognize me as touchpad. */ 1240 /* For X to recognize me as touchpad. */
1233 input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0); 1241 input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0);
1234 input_set_abs_params(dev, ABS_Y, y_min, y_max, 0, 0); 1242 input_set_abs_params(dev, ABS_Y, y_min, y_max, 0, 0);
1235 input_abs_set_res(dev, ABS_X, x_res);
1236 input_abs_set_res(dev, ABS_Y, y_res);
1237 /* 1243 /*
1238 * range of pressure and width is the same as v2, 1244 * range of pressure and width is the same as v2,
1239 * report ABS_PRESSURE, ABS_TOOL_WIDTH for compatibility. 1245 * report ABS_PRESSURE, ABS_TOOL_WIDTH for compatibility.
@@ -1246,8 +1252,6 @@ static int elantech_set_input_params(struct psmouse *psmouse)
1246 input_mt_init_slots(dev, ETP_MAX_FINGERS, 0); 1252 input_mt_init_slots(dev, ETP_MAX_FINGERS, 0);
1247 input_set_abs_params(dev, ABS_MT_POSITION_X, x_min, x_max, 0, 0); 1253 input_set_abs_params(dev, ABS_MT_POSITION_X, x_min, x_max, 0, 0);
1248 input_set_abs_params(dev, ABS_MT_POSITION_Y, y_min, y_max, 0, 0); 1254 input_set_abs_params(dev, ABS_MT_POSITION_Y, y_min, y_max, 0, 0);
1249 input_abs_set_res(dev, ABS_MT_POSITION_X, x_res);
1250 input_abs_set_res(dev, ABS_MT_POSITION_Y, y_res);
1251 input_set_abs_params(dev, ABS_MT_PRESSURE, ETP_PMIN_V2, 1255 input_set_abs_params(dev, ABS_MT_PRESSURE, ETP_PMIN_V2,
1252 ETP_PMAX_V2, 0, 0); 1256 ETP_PMAX_V2, 0, 0);
1253 /* 1257 /*
@@ -1259,6 +1263,13 @@ static int elantech_set_input_params(struct psmouse *psmouse)
1259 break; 1263 break;
1260 } 1264 }
1261 1265
1266 input_abs_set_res(dev, ABS_X, x_res);
1267 input_abs_set_res(dev, ABS_Y, y_res);
1268 if (etd->hw_version > 1) {
1269 input_abs_set_res(dev, ABS_MT_POSITION_X, x_res);
1270 input_abs_set_res(dev, ABS_MT_POSITION_Y, y_res);
1271 }
1272
1262 etd->y_max = y_max; 1273 etd->y_max = y_max;
1263 etd->width = width; 1274 etd->width = width;
1264 1275
@@ -1648,6 +1659,16 @@ int elantech_init(struct psmouse *psmouse)
1648 etd->capabilities[0], etd->capabilities[1], 1659 etd->capabilities[0], etd->capabilities[1],
1649 etd->capabilities[2]); 1660 etd->capabilities[2]);
1650 1661
1662 if (etd->hw_version != 1) {
1663 if (etd->send_cmd(psmouse, ETP_SAMPLE_QUERY, etd->samples)) {
1664 psmouse_err(psmouse, "failed to query sample data\n");
1665 goto init_fail;
1666 }
1667 psmouse_info(psmouse,
1668 "Elan sample query result %02x, %02x, %02x\n",
1669 etd->samples[0], etd->samples[1], etd->samples[2]);
1670 }
1671
1651 if (elantech_set_absolute_mode(psmouse)) { 1672 if (elantech_set_absolute_mode(psmouse)) {
1652 psmouse_err(psmouse, 1673 psmouse_err(psmouse,
1653 "failed to put touchpad into absolute mode.\n"); 1674 "failed to put touchpad into absolute mode.\n");
diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h
index f965d1569cc3..e1cbf409d9c8 100644
--- a/drivers/input/mouse/elantech.h
+++ b/drivers/input/mouse/elantech.h
@@ -129,6 +129,7 @@ struct elantech_data {
129 unsigned char reg_26; 129 unsigned char reg_26;
130 unsigned char debug; 130 unsigned char debug;
131 unsigned char capabilities[3]; 131 unsigned char capabilities[3];
132 unsigned char samples[3];
132 bool paritycheck; 133 bool paritycheck;
133 bool jumpy_cursor; 134 bool jumpy_cursor;
134 bool reports_pressure; 135 bool reports_pressure;
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 3a32caf06bf1..6025eb430c0a 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -1484,12 +1484,12 @@ static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
1484 priv->pkt_type = SYN_MODEL_NEWABS(priv->model_id) ? SYN_NEWABS : SYN_OLDABS; 1484 priv->pkt_type = SYN_MODEL_NEWABS(priv->model_id) ? SYN_NEWABS : SYN_OLDABS;
1485 1485
1486 psmouse_info(psmouse, 1486 psmouse_info(psmouse,
1487 "Touchpad model: %ld, fw: %ld.%ld, id: %#lx, caps: %#lx/%#lx/%#lx, board id: %lu, fw id: %lu\n", 1487 "Touchpad model: %ld, fw: %ld.%ld, id: %#lx, caps: %#lx/%#lx/%#lx/%#lx, board id: %lu, fw id: %lu\n",
1488 SYN_ID_MODEL(priv->identity), 1488 SYN_ID_MODEL(priv->identity),
1489 SYN_ID_MAJOR(priv->identity), SYN_ID_MINOR(priv->identity), 1489 SYN_ID_MAJOR(priv->identity), SYN_ID_MINOR(priv->identity),
1490 priv->model_id, 1490 priv->model_id,
1491 priv->capabilities, priv->ext_cap, priv->ext_cap_0c, 1491 priv->capabilities, priv->ext_cap, priv->ext_cap_0c,
1492 priv->board_id, priv->firmware_id); 1492 priv->ext_cap_10, priv->board_id, priv->firmware_id);
1493 1493
1494 set_input_params(psmouse, priv); 1494 set_input_params(psmouse, priv);
1495 1495
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index b4d12e29abff..e36162b28c2a 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -15,6 +15,7 @@
15 */ 15 */
16 16
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/dmi.h>
18#include <linux/i2c.h> 19#include <linux/i2c.h>
19#include <linux/input.h> 20#include <linux/input.h>
20#include <linux/input/mt.h> 21#include <linux/input/mt.h>
@@ -34,6 +35,7 @@ struct goodix_ts_data {
34 int abs_y_max; 35 int abs_y_max;
35 unsigned int max_touch_num; 36 unsigned int max_touch_num;
36 unsigned int int_trigger_type; 37 unsigned int int_trigger_type;
38 bool rotated_screen;
37}; 39};
38 40
39#define GOODIX_MAX_HEIGHT 4096 41#define GOODIX_MAX_HEIGHT 4096
@@ -60,6 +62,30 @@ static const unsigned long goodix_irq_flags[] = {
60 IRQ_TYPE_LEVEL_HIGH, 62 IRQ_TYPE_LEVEL_HIGH,
61}; 63};
62 64
65/*
66 * Those tablets have their coordinates origin at the bottom right
67 * of the tablet, as if rotated 180 degrees
68 */
69static const struct dmi_system_id rotated_screen[] = {
70#if defined(CONFIG_DMI) && defined(CONFIG_X86)
71 {
72 .ident = "WinBook TW100",
73 .matches = {
74 DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
75 DMI_MATCH(DMI_PRODUCT_NAME, "TW100")
76 }
77 },
78 {
79 .ident = "WinBook TW700",
80 .matches = {
81 DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
82 DMI_MATCH(DMI_PRODUCT_NAME, "TW700")
83 },
84 },
85#endif
86 {}
87};
88
63/** 89/**
64 * goodix_i2c_read - read data from a register of the i2c slave device. 90 * goodix_i2c_read - read data from a register of the i2c slave device.
65 * 91 *
@@ -129,6 +155,11 @@ static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data)
129 int input_y = get_unaligned_le16(&coor_data[3]); 155 int input_y = get_unaligned_le16(&coor_data[3]);
130 int input_w = get_unaligned_le16(&coor_data[5]); 156 int input_w = get_unaligned_le16(&coor_data[5]);
131 157
158 if (ts->rotated_screen) {
159 input_x = ts->abs_x_max - input_x;
160 input_y = ts->abs_y_max - input_y;
161 }
162
132 input_mt_slot(ts->input_dev, id); 163 input_mt_slot(ts->input_dev, id);
133 input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, true); 164 input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, true);
134 input_report_abs(ts->input_dev, ABS_MT_POSITION_X, input_x); 165 input_report_abs(ts->input_dev, ABS_MT_POSITION_X, input_x);
@@ -223,6 +254,11 @@ static void goodix_read_config(struct goodix_ts_data *ts)
223 ts->abs_y_max = GOODIX_MAX_HEIGHT; 254 ts->abs_y_max = GOODIX_MAX_HEIGHT;
224 ts->max_touch_num = GOODIX_MAX_CONTACTS; 255 ts->max_touch_num = GOODIX_MAX_CONTACTS;
225 } 256 }
257
258 ts->rotated_screen = dmi_check_system(rotated_screen);
259 if (ts->rotated_screen)
260 dev_dbg(&ts->client->dev,
261 "Applying '180 degrees rotated screen' quirk\n");
226} 262}
227 263
228/** 264/**
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index f2c6c352c55a..2c41107240de 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -627,6 +627,9 @@ static int dmc_tsc10_init(struct usbtouch_usb *usbtouch)
627 goto err_out; 627 goto err_out;
628 } 628 }
629 629
630 /* TSC-25 data sheet specifies a delay after the RESET command */
631 msleep(150);
632
630 /* set coordinate output rate */ 633 /* set coordinate output rate */
631 buf[0] = buf[1] = 0xFF; 634 buf[0] = buf[1] = 0xFF;
632 ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0), 635 ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0),
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index a57e9b749895..658ee39e6569 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -76,8 +76,6 @@ LIST_HEAD(hpet_map);
76 * Domain for untranslated devices - only allocated 76 * Domain for untranslated devices - only allocated
77 * if iommu=pt passed on kernel cmd line. 77 * if iommu=pt passed on kernel cmd line.
78 */ 78 */
79static struct protection_domain *pt_domain;
80
81static const struct iommu_ops amd_iommu_ops; 79static const struct iommu_ops amd_iommu_ops;
82 80
83static ATOMIC_NOTIFIER_HEAD(ppr_notifier); 81static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
@@ -96,7 +94,7 @@ struct iommu_dev_data {
96 struct protection_domain *domain; /* Domain the device is bound to */ 94 struct protection_domain *domain; /* Domain the device is bound to */
97 u16 devid; /* PCI Device ID */ 95 u16 devid; /* PCI Device ID */
98 bool iommu_v2; /* Device can make use of IOMMUv2 */ 96 bool iommu_v2; /* Device can make use of IOMMUv2 */
99 bool passthrough; /* Default for device is pt_domain */ 97 bool passthrough; /* Device is identity mapped */
100 struct { 98 struct {
101 bool enabled; 99 bool enabled;
102 int qdep; 100 int qdep;
@@ -116,7 +114,6 @@ struct iommu_cmd {
116struct kmem_cache *amd_iommu_irq_cache; 114struct kmem_cache *amd_iommu_irq_cache;
117 115
118static void update_domain(struct protection_domain *domain); 116static void update_domain(struct protection_domain *domain);
119static int alloc_passthrough_domain(void);
120static int protection_domain_init(struct protection_domain *domain); 117static int protection_domain_init(struct protection_domain *domain);
121 118
122/**************************************************************************** 119/****************************************************************************
@@ -2167,15 +2164,17 @@ static int attach_device(struct device *dev,
2167 dev_data = get_dev_data(dev); 2164 dev_data = get_dev_data(dev);
2168 2165
2169 if (domain->flags & PD_IOMMUV2_MASK) { 2166 if (domain->flags & PD_IOMMUV2_MASK) {
2170 if (!dev_data->iommu_v2 || !dev_data->passthrough) 2167 if (!dev_data->passthrough)
2171 return -EINVAL; 2168 return -EINVAL;
2172 2169
2173 if (pdev_iommuv2_enable(pdev) != 0) 2170 if (dev_data->iommu_v2) {
2174 return -EINVAL; 2171 if (pdev_iommuv2_enable(pdev) != 0)
2172 return -EINVAL;
2175 2173
2176 dev_data->ats.enabled = true; 2174 dev_data->ats.enabled = true;
2177 dev_data->ats.qdep = pci_ats_queue_depth(pdev); 2175 dev_data->ats.qdep = pci_ats_queue_depth(pdev);
2178 dev_data->pri_tlp = pci_pri_tlp_required(pdev); 2176 dev_data->pri_tlp = pci_pri_tlp_required(pdev);
2177 }
2179 } else if (amd_iommu_iotlb_sup && 2178 } else if (amd_iommu_iotlb_sup &&
2180 pci_enable_ats(pdev, PAGE_SHIFT) == 0) { 2179 pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
2181 dev_data->ats.enabled = true; 2180 dev_data->ats.enabled = true;
@@ -2221,15 +2220,6 @@ static void __detach_device(struct iommu_dev_data *dev_data)
2221 do_detach(head); 2220 do_detach(head);
2222 2221
2223 spin_unlock_irqrestore(&domain->lock, flags); 2222 spin_unlock_irqrestore(&domain->lock, flags);
2224
2225 /*
2226 * If we run in passthrough mode the device must be assigned to the
2227 * passthrough domain if it is detached from any other domain.
2228 * Make sure we can deassign from the pt_domain itself.
2229 */
2230 if (dev_data->passthrough &&
2231 (dev_data->domain == NULL && domain != pt_domain))
2232 __attach_device(dev_data, pt_domain);
2233} 2223}
2234 2224
2235/* 2225/*
@@ -2249,7 +2239,7 @@ static void detach_device(struct device *dev)
2249 __detach_device(dev_data); 2239 __detach_device(dev_data);
2250 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 2240 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
2251 2241
2252 if (domain->flags & PD_IOMMUV2_MASK) 2242 if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2)
2253 pdev_iommuv2_disable(to_pci_dev(dev)); 2243 pdev_iommuv2_disable(to_pci_dev(dev));
2254 else if (dev_data->ats.enabled) 2244 else if (dev_data->ats.enabled)
2255 pci_disable_ats(to_pci_dev(dev)); 2245 pci_disable_ats(to_pci_dev(dev));
@@ -2287,17 +2277,15 @@ static int amd_iommu_add_device(struct device *dev)
2287 2277
2288 BUG_ON(!dev_data); 2278 BUG_ON(!dev_data);
2289 2279
2290 if (dev_data->iommu_v2) 2280 if (iommu_pass_through || dev_data->iommu_v2)
2291 iommu_request_dm_for_dev(dev); 2281 iommu_request_dm_for_dev(dev);
2292 2282
2293 /* Domains are initialized for this device - have a look what we ended up with */ 2283 /* Domains are initialized for this device - have a look what we ended up with */
2294 domain = iommu_get_domain_for_dev(dev); 2284 domain = iommu_get_domain_for_dev(dev);
2295 if (domain->type == IOMMU_DOMAIN_IDENTITY) { 2285 if (domain->type == IOMMU_DOMAIN_IDENTITY)
2296 dev_data->passthrough = true; 2286 dev_data->passthrough = true;
2297 dev->archdata.dma_ops = &nommu_dma_ops; 2287 else
2298 } else {
2299 dev->archdata.dma_ops = &amd_iommu_dma_ops; 2288 dev->archdata.dma_ops = &amd_iommu_dma_ops;
2300 }
2301 2289
2302out: 2290out:
2303 iommu_completion_wait(iommu); 2291 iommu_completion_wait(iommu);
@@ -2862,8 +2850,17 @@ int __init amd_iommu_init_api(void)
2862 2850
2863int __init amd_iommu_init_dma_ops(void) 2851int __init amd_iommu_init_dma_ops(void)
2864{ 2852{
2853 swiotlb = iommu_pass_through ? 1 : 0;
2865 iommu_detected = 1; 2854 iommu_detected = 1;
2866 swiotlb = 0; 2855
2856 /*
2857 * In case we don't initialize SWIOTLB (actually the common case
2858 * when AMD IOMMU is enabled), make sure there are global
2859 * dma_ops set as a fall-back for devices not handled by this
2860 * driver (for example non-PCI devices).
2861 */
2862 if (!swiotlb)
2863 dma_ops = &nommu_dma_ops;
2867 2864
2868 amd_iommu_stats_init(); 2865 amd_iommu_stats_init();
2869 2866
@@ -2947,21 +2944,6 @@ out_err:
2947 return NULL; 2944 return NULL;
2948} 2945}
2949 2946
2950static int alloc_passthrough_domain(void)
2951{
2952 if (pt_domain != NULL)
2953 return 0;
2954
2955 /* allocate passthrough domain */
2956 pt_domain = protection_domain_alloc();
2957 if (!pt_domain)
2958 return -ENOMEM;
2959
2960 pt_domain->mode = PAGE_MODE_NONE;
2961
2962 return 0;
2963}
2964
2965static struct iommu_domain *amd_iommu_domain_alloc(unsigned type) 2947static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
2966{ 2948{
2967 struct protection_domain *pdomain; 2949 struct protection_domain *pdomain;
@@ -3222,33 +3204,6 @@ static const struct iommu_ops amd_iommu_ops = {
3222 * 3204 *
3223 *****************************************************************************/ 3205 *****************************************************************************/
3224 3206
3225int __init amd_iommu_init_passthrough(void)
3226{
3227 struct iommu_dev_data *dev_data;
3228 struct pci_dev *dev = NULL;
3229 int ret;
3230
3231 ret = alloc_passthrough_domain();
3232 if (ret)
3233 return ret;
3234
3235 for_each_pci_dev(dev) {
3236 if (!check_device(&dev->dev))
3237 continue;
3238
3239 dev_data = get_dev_data(&dev->dev);
3240 dev_data->passthrough = true;
3241
3242 attach_device(&dev->dev, pt_domain);
3243 }
3244
3245 amd_iommu_stats_init();
3246
3247 pr_info("AMD-Vi: Initialized for Passthrough Mode\n");
3248
3249 return 0;
3250}
3251
3252/* IOMMUv2 specific functions */ 3207/* IOMMUv2 specific functions */
3253int amd_iommu_register_ppr_notifier(struct notifier_block *nb) 3208int amd_iommu_register_ppr_notifier(struct notifier_block *nb)
3254{ 3209{
@@ -3363,7 +3318,12 @@ static int __flush_pasid(struct protection_domain *domain, int pasid,
3363 struct amd_iommu *iommu; 3318 struct amd_iommu *iommu;
3364 int qdep; 3319 int qdep;
3365 3320
3366 BUG_ON(!dev_data->ats.enabled); 3321 /*
3322 There might be non-IOMMUv2 capable devices in an IOMMUv2
3323 * domain.
3324 */
3325 if (!dev_data->ats.enabled)
3326 continue;
3367 3327
3368 qdep = dev_data->ats.qdep; 3328 qdep = dev_data->ats.qdep;
3369 iommu = amd_iommu_rlookup_table[dev_data->devid]; 3329 iommu = amd_iommu_rlookup_table[dev_data->devid];
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index dbda9ae68c5d..a24495eb4e26 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -2026,14 +2026,6 @@ static bool detect_ivrs(void)
2026 return true; 2026 return true;
2027} 2027}
2028 2028
2029static int amd_iommu_init_dma(void)
2030{
2031 if (iommu_pass_through)
2032 return amd_iommu_init_passthrough();
2033 else
2034 return amd_iommu_init_dma_ops();
2035}
2036
2037/**************************************************************************** 2029/****************************************************************************
2038 * 2030 *
2039 * AMD IOMMU Initialization State Machine 2031 * AMD IOMMU Initialization State Machine
@@ -2073,7 +2065,7 @@ static int __init state_next(void)
2073 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN; 2065 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
2074 break; 2066 break;
2075 case IOMMU_INTERRUPTS_EN: 2067 case IOMMU_INTERRUPTS_EN:
2076 ret = amd_iommu_init_dma(); 2068 ret = amd_iommu_init_dma_ops();
2077 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS; 2069 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
2078 break; 2070 break;
2079 case IOMMU_DMA_OPS: 2071 case IOMMU_DMA_OPS:
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 3465faf1809e..f7b875bb70d4 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -132,11 +132,19 @@ static struct device_state *get_device_state(u16 devid)
132 132
133static void free_device_state(struct device_state *dev_state) 133static void free_device_state(struct device_state *dev_state)
134{ 134{
135 struct iommu_group *group;
136
135 /* 137 /*
136 * First detach device from domain - No more PRI requests will arrive 138 * First detach device from domain - No more PRI requests will arrive
137 * from that device after it is unbound from the IOMMUv2 domain. 139 * from that device after it is unbound from the IOMMUv2 domain.
138 */ 140 */
139 iommu_detach_device(dev_state->domain, &dev_state->pdev->dev); 141 group = iommu_group_get(&dev_state->pdev->dev);
142 if (WARN_ON(!group))
143 return;
144
145 iommu_detach_group(dev_state->domain, group);
146
147 iommu_group_put(group);
140 148
141 /* Everything is down now, free the IOMMUv2 domain */ 149 /* Everything is down now, free the IOMMUv2 domain */
142 iommu_domain_free(dev_state->domain); 150 iommu_domain_free(dev_state->domain);
@@ -731,6 +739,7 @@ EXPORT_SYMBOL(amd_iommu_unbind_pasid);
731int amd_iommu_init_device(struct pci_dev *pdev, int pasids) 739int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
732{ 740{
733 struct device_state *dev_state; 741 struct device_state *dev_state;
742 struct iommu_group *group;
734 unsigned long flags; 743 unsigned long flags;
735 int ret, tmp; 744 int ret, tmp;
736 u16 devid; 745 u16 devid;
@@ -776,10 +785,16 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
776 if (ret) 785 if (ret)
777 goto out_free_domain; 786 goto out_free_domain;
778 787
779 ret = iommu_attach_device(dev_state->domain, &pdev->dev); 788 group = iommu_group_get(&pdev->dev);
780 if (ret != 0) 789 if (!group)
781 goto out_free_domain; 790 goto out_free_domain;
782 791
792 ret = iommu_attach_group(dev_state->domain, group);
793 if (ret != 0)
794 goto out_drop_group;
795
796 iommu_group_put(group);
797
783 spin_lock_irqsave(&state_lock, flags); 798 spin_lock_irqsave(&state_lock, flags);
784 799
785 if (__get_device_state(devid) != NULL) { 800 if (__get_device_state(devid) != NULL) {
@@ -794,6 +809,9 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
794 809
795 return 0; 810 return 0;
796 811
812out_drop_group:
813 iommu_group_put(group);
814
797out_free_domain: 815out_free_domain:
798 iommu_domain_free(dev_state->domain); 816 iommu_domain_free(dev_state->domain);
799 817
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 8e9ec81ce4bb..da902baaa794 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -199,9 +199,10 @@
199 * Stream table. 199 * Stream table.
200 * 200 *
201 * Linear: Enough to cover 1 << IDR1.SIDSIZE entries 201 * Linear: Enough to cover 1 << IDR1.SIDSIZE entries
202 * 2lvl: 8k L1 entries, 256 lazy entries per table (each table covers a PCI bus) 202 * 2lvl: 128k L1 entries,
203 * 256 lazy entries per table (each table covers a PCI bus)
203 */ 204 */
204#define STRTAB_L1_SZ_SHIFT 16 205#define STRTAB_L1_SZ_SHIFT 20
205#define STRTAB_SPLIT 8 206#define STRTAB_SPLIT 8
206 207
207#define STRTAB_L1_DESC_DWORDS 1 208#define STRTAB_L1_DESC_DWORDS 1
@@ -269,10 +270,10 @@
269#define ARM64_TCR_TG0_SHIFT 14 270#define ARM64_TCR_TG0_SHIFT 14
270#define ARM64_TCR_TG0_MASK 0x3UL 271#define ARM64_TCR_TG0_MASK 0x3UL
271#define CTXDESC_CD_0_TCR_IRGN0_SHIFT 8 272#define CTXDESC_CD_0_TCR_IRGN0_SHIFT 8
272#define ARM64_TCR_IRGN0_SHIFT 24 273#define ARM64_TCR_IRGN0_SHIFT 8
273#define ARM64_TCR_IRGN0_MASK 0x3UL 274#define ARM64_TCR_IRGN0_MASK 0x3UL
274#define CTXDESC_CD_0_TCR_ORGN0_SHIFT 10 275#define CTXDESC_CD_0_TCR_ORGN0_SHIFT 10
275#define ARM64_TCR_ORGN0_SHIFT 26 276#define ARM64_TCR_ORGN0_SHIFT 10
276#define ARM64_TCR_ORGN0_MASK 0x3UL 277#define ARM64_TCR_ORGN0_MASK 0x3UL
277#define CTXDESC_CD_0_TCR_SH0_SHIFT 12 278#define CTXDESC_CD_0_TCR_SH0_SHIFT 12
278#define ARM64_TCR_SH0_SHIFT 12 279#define ARM64_TCR_SH0_SHIFT 12
@@ -542,6 +543,9 @@ struct arm_smmu_device {
542#define ARM_SMMU_FEAT_HYP (1 << 12) 543#define ARM_SMMU_FEAT_HYP (1 << 12)
543 u32 features; 544 u32 features;
544 545
546#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
547 u32 options;
548
545 struct arm_smmu_cmdq cmdq; 549 struct arm_smmu_cmdq cmdq;
546 struct arm_smmu_evtq evtq; 550 struct arm_smmu_evtq evtq;
547 struct arm_smmu_priq priq; 551 struct arm_smmu_priq priq;
@@ -602,11 +606,35 @@ struct arm_smmu_domain {
602static DEFINE_SPINLOCK(arm_smmu_devices_lock); 606static DEFINE_SPINLOCK(arm_smmu_devices_lock);
603static LIST_HEAD(arm_smmu_devices); 607static LIST_HEAD(arm_smmu_devices);
604 608
609struct arm_smmu_option_prop {
610 u32 opt;
611 const char *prop;
612};
613
614static struct arm_smmu_option_prop arm_smmu_options[] = {
615 { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
616 { 0, NULL},
617};
618
605static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) 619static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
606{ 620{
607 return container_of(dom, struct arm_smmu_domain, domain); 621 return container_of(dom, struct arm_smmu_domain, domain);
608} 622}
609 623
624static void parse_driver_options(struct arm_smmu_device *smmu)
625{
626 int i = 0;
627
628 do {
629 if (of_property_read_bool(smmu->dev->of_node,
630 arm_smmu_options[i].prop)) {
631 smmu->options |= arm_smmu_options[i].opt;
632 dev_notice(smmu->dev, "option %s\n",
633 arm_smmu_options[i].prop);
634 }
635 } while (arm_smmu_options[++i].opt);
636}
637
610/* Low-level queue manipulation functions */ 638/* Low-level queue manipulation functions */
611static bool queue_full(struct arm_smmu_queue *q) 639static bool queue_full(struct arm_smmu_queue *q)
612{ 640{
@@ -1036,7 +1064,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
1036 arm_smmu_sync_ste_for_sid(smmu, sid); 1064 arm_smmu_sync_ste_for_sid(smmu, sid);
1037 1065
1038 /* It's likely that we'll want to use the new STE soon */ 1066 /* It's likely that we'll want to use the new STE soon */
1039 arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd); 1067 if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH))
1068 arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
1040} 1069}
1041 1070
1042static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent) 1071static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent)
@@ -1064,7 +1093,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
1064 return 0; 1093 return 0;
1065 1094
1066 size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3); 1095 size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
1067 strtab = &cfg->strtab[sid >> STRTAB_SPLIT << STRTAB_L1_DESC_DWORDS]; 1096 strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS];
1068 1097
1069 desc->span = STRTAB_SPLIT + 1; 1098 desc->span = STRTAB_SPLIT + 1;
1070 desc->l2ptr = dma_zalloc_coherent(smmu->dev, size, &desc->l2ptr_dma, 1099 desc->l2ptr = dma_zalloc_coherent(smmu->dev, size, &desc->l2ptr_dma,
@@ -2020,21 +2049,23 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
2020{ 2049{
2021 void *strtab; 2050 void *strtab;
2022 u64 reg; 2051 u64 reg;
2023 u32 size; 2052 u32 size, l1size;
2024 int ret; 2053 int ret;
2025 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; 2054 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2026 2055
2027 /* Calculate the L1 size, capped to the SIDSIZE */ 2056 /* Calculate the L1 size, capped to the SIDSIZE */
2028 size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3); 2057 size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
2029 size = min(size, smmu->sid_bits - STRTAB_SPLIT); 2058 size = min(size, smmu->sid_bits - STRTAB_SPLIT);
2030 if (size + STRTAB_SPLIT < smmu->sid_bits) 2059 cfg->num_l1_ents = 1 << size;
2060
2061 size += STRTAB_SPLIT;
2062 if (size < smmu->sid_bits)
2031 dev_warn(smmu->dev, 2063 dev_warn(smmu->dev,
2032 "2-level strtab only covers %u/%u bits of SID\n", 2064 "2-level strtab only covers %u/%u bits of SID\n",
2033 size + STRTAB_SPLIT, smmu->sid_bits); 2065 size, smmu->sid_bits);
2034 2066
2035 cfg->num_l1_ents = 1 << size; 2067 l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
2036 size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3); 2068 strtab = dma_zalloc_coherent(smmu->dev, l1size, &cfg->strtab_dma,
2037 strtab = dma_zalloc_coherent(smmu->dev, size, &cfg->strtab_dma,
2038 GFP_KERNEL); 2069 GFP_KERNEL);
2039 if (!strtab) { 2070 if (!strtab) {
2040 dev_err(smmu->dev, 2071 dev_err(smmu->dev,
@@ -2055,8 +2086,7 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
2055 ret = arm_smmu_init_l1_strtab(smmu); 2086 ret = arm_smmu_init_l1_strtab(smmu);
2056 if (ret) 2087 if (ret)
2057 dma_free_coherent(smmu->dev, 2088 dma_free_coherent(smmu->dev,
2058 cfg->num_l1_ents * 2089 l1size,
2059 (STRTAB_L1_DESC_DWORDS << 3),
2060 strtab, 2090 strtab,
2061 cfg->strtab_dma); 2091 cfg->strtab_dma);
2062 return ret; 2092 return ret;
@@ -2573,6 +2603,8 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
2573 if (irq > 0) 2603 if (irq > 0)
2574 smmu->gerr_irq = irq; 2604 smmu->gerr_irq = irq;
2575 2605
2606 parse_driver_options(smmu);
2607
2576 /* Probe the h/w */ 2608 /* Probe the h/w */
2577 ret = arm_smmu_device_probe(smmu); 2609 ret = arm_smmu_device_probe(smmu);
2578 if (ret) 2610 if (ret)
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index a98a7b27aca1..0649b94f5958 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1830,8 +1830,9 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
1830 1830
1831static void domain_exit(struct dmar_domain *domain) 1831static void domain_exit(struct dmar_domain *domain)
1832{ 1832{
1833 struct dmar_drhd_unit *drhd;
1834 struct intel_iommu *iommu;
1833 struct page *freelist = NULL; 1835 struct page *freelist = NULL;
1834 int i;
1835 1836
1836 /* Domain 0 is reserved, so dont process it */ 1837 /* Domain 0 is reserved, so dont process it */
1837 if (!domain) 1838 if (!domain)
@@ -1851,8 +1852,10 @@ static void domain_exit(struct dmar_domain *domain)
1851 1852
1852 /* clear attached or cached domains */ 1853 /* clear attached or cached domains */
1853 rcu_read_lock(); 1854 rcu_read_lock();
1854 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) 1855 for_each_active_iommu(iommu, drhd)
1855 iommu_detach_domain(domain, g_iommus[i]); 1856 if (domain_type_is_vm(domain) ||
1857 test_bit(iommu->seq_id, domain->iommu_bmp))
1858 iommu_detach_domain(domain, iommu);
1856 rcu_read_unlock(); 1859 rcu_read_unlock();
1857 1860
1858 dma_free_pagelist(freelist); 1861 dma_free_pagelist(freelist);
diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c
index 692fe2bc8197..c12bb93334ff 100644
--- a/drivers/irqchip/irq-crossbar.c
+++ b/drivers/irqchip/irq-crossbar.c
@@ -68,7 +68,9 @@ static struct irq_chip crossbar_chip = {
68 .irq_mask = irq_chip_mask_parent, 68 .irq_mask = irq_chip_mask_parent,
69 .irq_unmask = irq_chip_unmask_parent, 69 .irq_unmask = irq_chip_unmask_parent,
70 .irq_retrigger = irq_chip_retrigger_hierarchy, 70 .irq_retrigger = irq_chip_retrigger_hierarchy,
71 .irq_set_wake = irq_chip_set_wake_parent, 71 .irq_set_type = irq_chip_set_type_parent,
72 .flags = IRQCHIP_MASK_ON_SUSPEND |
73 IRQCHIP_SKIP_SET_WAKE,
72#ifdef CONFIG_SMP 74#ifdef CONFIG_SMP
73 .irq_set_affinity = irq_chip_set_affinity_parent, 75 .irq_set_affinity = irq_chip_set_affinity_parent,
74#endif 76#endif
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 1b7e155869f6..c00e2db351ba 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -75,6 +75,13 @@ struct its_node {
75 75
76#define ITS_ITT_ALIGN SZ_256 76#define ITS_ITT_ALIGN SZ_256
77 77
78struct event_lpi_map {
79 unsigned long *lpi_map;
80 u16 *col_map;
81 irq_hw_number_t lpi_base;
82 int nr_lpis;
83};
84
78/* 85/*
79 * The ITS view of a device - belongs to an ITS, a collection, owns an 86 * The ITS view of a device - belongs to an ITS, a collection, owns an
80 * interrupt translation table, and a list of interrupts. 87 * interrupt translation table, and a list of interrupts.
@@ -82,11 +89,8 @@ struct its_node {
82struct its_device { 89struct its_device {
83 struct list_head entry; 90 struct list_head entry;
84 struct its_node *its; 91 struct its_node *its;
85 struct its_collection *collection; 92 struct event_lpi_map event_map;
86 void *itt; 93 void *itt;
87 unsigned long *lpi_map;
88 irq_hw_number_t lpi_base;
89 int nr_lpis;
90 u32 nr_ites; 94 u32 nr_ites;
91 u32 device_id; 95 u32 device_id;
92}; 96};
@@ -99,6 +103,14 @@ static struct rdists *gic_rdists;
99#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) 103#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
100#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) 104#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
101 105
106static struct its_collection *dev_event_to_col(struct its_device *its_dev,
107 u32 event)
108{
109 struct its_node *its = its_dev->its;
110
111 return its->collections + its_dev->event_map.col_map[event];
112}
113
102/* 114/*
103 * ITS command descriptors - parameters to be encoded in a command 115 * ITS command descriptors - parameters to be encoded in a command
104 * block. 116 * block.
@@ -134,7 +146,7 @@ struct its_cmd_desc {
134 struct { 146 struct {
135 struct its_device *dev; 147 struct its_device *dev;
136 struct its_collection *col; 148 struct its_collection *col;
137 u32 id; 149 u32 event_id;
138 } its_movi_cmd; 150 } its_movi_cmd;
139 151
140 struct { 152 struct {
@@ -241,7 +253,7 @@ static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd,
241 253
242 its_fixup_cmd(cmd); 254 its_fixup_cmd(cmd);
243 255
244 return desc->its_mapd_cmd.dev->collection; 256 return NULL;
245} 257}
246 258
247static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd, 259static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd,
@@ -260,52 +272,72 @@ static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd,
260static struct its_collection *its_build_mapvi_cmd(struct its_cmd_block *cmd, 272static struct its_collection *its_build_mapvi_cmd(struct its_cmd_block *cmd,
261 struct its_cmd_desc *desc) 273 struct its_cmd_desc *desc)
262{ 274{
275 struct its_collection *col;
276
277 col = dev_event_to_col(desc->its_mapvi_cmd.dev,
278 desc->its_mapvi_cmd.event_id);
279
263 its_encode_cmd(cmd, GITS_CMD_MAPVI); 280 its_encode_cmd(cmd, GITS_CMD_MAPVI);
264 its_encode_devid(cmd, desc->its_mapvi_cmd.dev->device_id); 281 its_encode_devid(cmd, desc->its_mapvi_cmd.dev->device_id);
265 its_encode_event_id(cmd, desc->its_mapvi_cmd.event_id); 282 its_encode_event_id(cmd, desc->its_mapvi_cmd.event_id);
266 its_encode_phys_id(cmd, desc->its_mapvi_cmd.phys_id); 283 its_encode_phys_id(cmd, desc->its_mapvi_cmd.phys_id);
267 its_encode_collection(cmd, desc->its_mapvi_cmd.dev->collection->col_id); 284 its_encode_collection(cmd, col->col_id);
268 285
269 its_fixup_cmd(cmd); 286 its_fixup_cmd(cmd);
270 287
271 return desc->its_mapvi_cmd.dev->collection; 288 return col;
272} 289}
273 290
274static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd, 291static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd,
275 struct its_cmd_desc *desc) 292 struct its_cmd_desc *desc)
276{ 293{
294 struct its_collection *col;
295
296 col = dev_event_to_col(desc->its_movi_cmd.dev,
297 desc->its_movi_cmd.event_id);
298
277 its_encode_cmd(cmd, GITS_CMD_MOVI); 299 its_encode_cmd(cmd, GITS_CMD_MOVI);
278 its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id); 300 its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
279 its_encode_event_id(cmd, desc->its_movi_cmd.id); 301 its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
280 its_encode_collection(cmd, desc->its_movi_cmd.col->col_id); 302 its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
281 303
282 its_fixup_cmd(cmd); 304 its_fixup_cmd(cmd);
283 305
284 return desc->its_movi_cmd.dev->collection; 306 return col;
285} 307}
286 308
287static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd, 309static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd,
288 struct its_cmd_desc *desc) 310 struct its_cmd_desc *desc)
289{ 311{
312 struct its_collection *col;
313
314 col = dev_event_to_col(desc->its_discard_cmd.dev,
315 desc->its_discard_cmd.event_id);
316
290 its_encode_cmd(cmd, GITS_CMD_DISCARD); 317 its_encode_cmd(cmd, GITS_CMD_DISCARD);
291 its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id); 318 its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
292 its_encode_event_id(cmd, desc->its_discard_cmd.event_id); 319 its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
293 320
294 its_fixup_cmd(cmd); 321 its_fixup_cmd(cmd);
295 322
296 return desc->its_discard_cmd.dev->collection; 323 return col;
297} 324}
298 325
299static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd, 326static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd,
300 struct its_cmd_desc *desc) 327 struct its_cmd_desc *desc)
301{ 328{
329 struct its_collection *col;
330
331 col = dev_event_to_col(desc->its_inv_cmd.dev,
332 desc->its_inv_cmd.event_id);
333
302 its_encode_cmd(cmd, GITS_CMD_INV); 334 its_encode_cmd(cmd, GITS_CMD_INV);
303 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id); 335 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
304 its_encode_event_id(cmd, desc->its_inv_cmd.event_id); 336 its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
305 337
306 its_fixup_cmd(cmd); 338 its_fixup_cmd(cmd);
307 339
308 return desc->its_inv_cmd.dev->collection; 340 return col;
309} 341}
310 342
311static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd, 343static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd,
@@ -497,7 +529,7 @@ static void its_send_movi(struct its_device *dev,
497 529
498 desc.its_movi_cmd.dev = dev; 530 desc.its_movi_cmd.dev = dev;
499 desc.its_movi_cmd.col = col; 531 desc.its_movi_cmd.col = col;
500 desc.its_movi_cmd.id = id; 532 desc.its_movi_cmd.event_id = id;
501 533
502 its_send_single_command(dev->its, its_build_movi_cmd, &desc); 534 its_send_single_command(dev->its, its_build_movi_cmd, &desc);
503} 535}
@@ -528,7 +560,7 @@ static void its_send_invall(struct its_node *its, struct its_collection *col)
528static inline u32 its_get_event_id(struct irq_data *d) 560static inline u32 its_get_event_id(struct irq_data *d)
529{ 561{
530 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 562 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
531 return d->hwirq - its_dev->lpi_base; 563 return d->hwirq - its_dev->event_map.lpi_base;
532} 564}
533 565
534static void lpi_set_config(struct irq_data *d, bool enable) 566static void lpi_set_config(struct irq_data *d, bool enable)
@@ -583,7 +615,7 @@ static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
583 615
584 target_col = &its_dev->its->collections[cpu]; 616 target_col = &its_dev->its->collections[cpu];
585 its_send_movi(its_dev, target_col, id); 617 its_send_movi(its_dev, target_col, id);
586 its_dev->collection = target_col; 618 its_dev->event_map.col_map[id] = cpu;
587 619
588 return IRQ_SET_MASK_OK_DONE; 620 return IRQ_SET_MASK_OK_DONE;
589} 621}
@@ -713,8 +745,10 @@ out:
713 return bitmap; 745 return bitmap;
714} 746}
715 747
716static void its_lpi_free(unsigned long *bitmap, int base, int nr_ids) 748static void its_lpi_free(struct event_lpi_map *map)
717{ 749{
750 int base = map->lpi_base;
751 int nr_ids = map->nr_lpis;
718 int lpi; 752 int lpi;
719 753
720 spin_lock(&lpi_lock); 754 spin_lock(&lpi_lock);
@@ -731,7 +765,8 @@ static void its_lpi_free(unsigned long *bitmap, int base, int nr_ids)
731 765
732 spin_unlock(&lpi_lock); 766 spin_unlock(&lpi_lock);
733 767
734 kfree(bitmap); 768 kfree(map->lpi_map);
769 kfree(map->col_map);
735} 770}
736 771
737/* 772/*
@@ -1099,11 +1134,11 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
1099 struct its_device *dev; 1134 struct its_device *dev;
1100 unsigned long *lpi_map; 1135 unsigned long *lpi_map;
1101 unsigned long flags; 1136 unsigned long flags;
1137 u16 *col_map = NULL;
1102 void *itt; 1138 void *itt;
1103 int lpi_base; 1139 int lpi_base;
1104 int nr_lpis; 1140 int nr_lpis;
1105 int nr_ites; 1141 int nr_ites;
1106 int cpu;
1107 int sz; 1142 int sz;
1108 1143
1109 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1144 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
@@ -1117,20 +1152,24 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
1117 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; 1152 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
1118 itt = kzalloc(sz, GFP_KERNEL); 1153 itt = kzalloc(sz, GFP_KERNEL);
1119 lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis); 1154 lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis);
1155 if (lpi_map)
1156 col_map = kzalloc(sizeof(*col_map) * nr_lpis, GFP_KERNEL);
1120 1157
1121 if (!dev || !itt || !lpi_map) { 1158 if (!dev || !itt || !lpi_map || !col_map) {
1122 kfree(dev); 1159 kfree(dev);
1123 kfree(itt); 1160 kfree(itt);
1124 kfree(lpi_map); 1161 kfree(lpi_map);
1162 kfree(col_map);
1125 return NULL; 1163 return NULL;
1126 } 1164 }
1127 1165
1128 dev->its = its; 1166 dev->its = its;
1129 dev->itt = itt; 1167 dev->itt = itt;
1130 dev->nr_ites = nr_ites; 1168 dev->nr_ites = nr_ites;
1131 dev->lpi_map = lpi_map; 1169 dev->event_map.lpi_map = lpi_map;
1132 dev->lpi_base = lpi_base; 1170 dev->event_map.col_map = col_map;
1133 dev->nr_lpis = nr_lpis; 1171 dev->event_map.lpi_base = lpi_base;
1172 dev->event_map.nr_lpis = nr_lpis;
1134 dev->device_id = dev_id; 1173 dev->device_id = dev_id;
1135 INIT_LIST_HEAD(&dev->entry); 1174 INIT_LIST_HEAD(&dev->entry);
1136 1175
@@ -1138,10 +1177,6 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
1138 list_add(&dev->entry, &its->its_device_list); 1177 list_add(&dev->entry, &its->its_device_list);
1139 raw_spin_unlock_irqrestore(&its->lock, flags); 1178 raw_spin_unlock_irqrestore(&its->lock, flags);
1140 1179
1141 /* Bind the device to the first possible CPU */
1142 cpu = cpumask_first(cpu_online_mask);
1143 dev->collection = &its->collections[cpu];
1144
1145 /* Map device to its ITT */ 1180 /* Map device to its ITT */
1146 its_send_mapd(dev, 1); 1181 its_send_mapd(dev, 1);
1147 1182
@@ -1163,12 +1198,13 @@ static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq)
1163{ 1198{
1164 int idx; 1199 int idx;
1165 1200
1166 idx = find_first_zero_bit(dev->lpi_map, dev->nr_lpis); 1201 idx = find_first_zero_bit(dev->event_map.lpi_map,
1167 if (idx == dev->nr_lpis) 1202 dev->event_map.nr_lpis);
1203 if (idx == dev->event_map.nr_lpis)
1168 return -ENOSPC; 1204 return -ENOSPC;
1169 1205
1170 *hwirq = dev->lpi_base + idx; 1206 *hwirq = dev->event_map.lpi_base + idx;
1171 set_bit(idx, dev->lpi_map); 1207 set_bit(idx, dev->event_map.lpi_map);
1172 1208
1173 return 0; 1209 return 0;
1174} 1210}
@@ -1288,7 +1324,8 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1288 irq_domain_set_hwirq_and_chip(domain, virq + i, 1324 irq_domain_set_hwirq_and_chip(domain, virq + i,
1289 hwirq, &its_irq_chip, its_dev); 1325 hwirq, &its_irq_chip, its_dev);
1290 dev_dbg(info->scratchpad[1].ptr, "ID:%d pID:%d vID:%d\n", 1326 dev_dbg(info->scratchpad[1].ptr, "ID:%d pID:%d vID:%d\n",
1291 (int)(hwirq - its_dev->lpi_base), (int)hwirq, virq + i); 1327 (int)(hwirq - its_dev->event_map.lpi_base),
1328 (int)hwirq, virq + i);
1292 } 1329 }
1293 1330
1294 return 0; 1331 return 0;
@@ -1300,6 +1337,9 @@ static void its_irq_domain_activate(struct irq_domain *domain,
1300 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1337 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1301 u32 event = its_get_event_id(d); 1338 u32 event = its_get_event_id(d);
1302 1339
1340 /* Bind the LPI to the first possible CPU */
1341 its_dev->event_map.col_map[event] = cpumask_first(cpu_online_mask);
1342
1303 /* Map the GIC IRQ and event to the device */ 1343 /* Map the GIC IRQ and event to the device */
1304 its_send_mapvi(its_dev, d->hwirq, event); 1344 its_send_mapvi(its_dev, d->hwirq, event);
1305} 1345}
@@ -1327,17 +1367,16 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
1327 u32 event = its_get_event_id(data); 1367 u32 event = its_get_event_id(data);
1328 1368
1329 /* Mark interrupt index as unused */ 1369 /* Mark interrupt index as unused */
1330 clear_bit(event, its_dev->lpi_map); 1370 clear_bit(event, its_dev->event_map.lpi_map);
1331 1371
1332 /* Nuke the entry in the domain */ 1372 /* Nuke the entry in the domain */
1333 irq_domain_reset_irq_data(data); 1373 irq_domain_reset_irq_data(data);
1334 } 1374 }
1335 1375
1336 /* If all interrupts have been freed, start mopping the floor */ 1376 /* If all interrupts have been freed, start mopping the floor */
1337 if (bitmap_empty(its_dev->lpi_map, its_dev->nr_lpis)) { 1377 if (bitmap_empty(its_dev->event_map.lpi_map,
1338 its_lpi_free(its_dev->lpi_map, 1378 its_dev->event_map.nr_lpis)) {
1339 its_dev->lpi_base, 1379 its_lpi_free(&its_dev->event_map);
1340 its_dev->nr_lpis);
1341 1380
1342 /* Unmap device/itt */ 1381 /* Unmap device/itt */
1343 its_send_mapd(its_dev, 0); 1382 its_send_mapd(its_dev, 0);
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index b7d54d428b5e..ff4be0515a0d 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -538,7 +538,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
538 538
539static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) 539static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
540{ 540{
541 smp_call_function_interrupt(); 541 generic_smp_call_function_interrupt();
542 542
543 return IRQ_HANDLED; 543 return IRQ_HANDLED;
544} 544}
diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c
index a45121546caf..acb721b31bcf 100644
--- a/drivers/irqchip/spear-shirq.c
+++ b/drivers/irqchip/spear-shirq.c
@@ -2,7 +2,7 @@
2 * SPEAr platform shared irq layer source file 2 * SPEAr platform shared irq layer source file
3 * 3 *
4 * Copyright (C) 2009-2012 ST Microelectronics 4 * Copyright (C) 2009-2012 ST Microelectronics
5 * Viresh Kumar <viresh.linux@gmail.com> 5 * Viresh Kumar <vireshk@kernel.org>
6 * 6 *
7 * Copyright (C) 2012 ST Microelectronics 7 * Copyright (C) 2012 ST Microelectronics
8 * Shiraz Hashim <shiraz.linux.kernel@gmail.com> 8 * Shiraz Hashim <shiraz.linux.kernel@gmail.com>
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index 8c91fd5eb6fd..375be509e95f 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -524,9 +524,18 @@ gigaset_tty_open(struct tty_struct *tty)
524 cs->hw.ser->tty = tty; 524 cs->hw.ser->tty = tty;
525 atomic_set(&cs->hw.ser->refcnt, 1); 525 atomic_set(&cs->hw.ser->refcnt, 1);
526 init_completion(&cs->hw.ser->dead_cmp); 526 init_completion(&cs->hw.ser->dead_cmp);
527
528 tty->disc_data = cs; 527 tty->disc_data = cs;
529 528
529 /* Set the amount of data we're willing to receive per call
530 * from the hardware driver to half of the input buffer size
531 * to leave some reserve.
532 * Note: We don't do flow control towards the hardware driver.
533 * If more data is received than will fit into the input buffer,
534 * it will be dropped and an error will be logged. This should
535 * never happen as the device is slow and the buffer size ample.
536 */
537 tty->receive_room = RBUFSIZE/2;
538
530 /* OK.. Initialization of the datastructures and the HW is done.. Now 539 /* OK.. Initialization of the datastructures and the HW is done.. Now
531 * startup system and notify the LL that we are ready to run 540 * startup system and notify the LL that we are ready to run
532 */ 541 */
@@ -598,28 +607,6 @@ static int gigaset_tty_hangup(struct tty_struct *tty)
598} 607}
599 608
600/* 609/*
601 * Read on the tty.
602 * Unused, received data goes only to the Gigaset driver.
603 */
604static ssize_t
605gigaset_tty_read(struct tty_struct *tty, struct file *file,
606 unsigned char __user *buf, size_t count)
607{
608 return -EAGAIN;
609}
610
611/*
612 * Write on the tty.
613 * Unused, transmit data comes only from the Gigaset driver.
614 */
615static ssize_t
616gigaset_tty_write(struct tty_struct *tty, struct file *file,
617 const unsigned char *buf, size_t count)
618{
619 return -EAGAIN;
620}
621
622/*
623 * Ioctl on the tty. 610 * Ioctl on the tty.
624 * Called in process context only. 611 * Called in process context only.
625 * May be re-entered by multiple ioctl calling threads. 612 * May be re-entered by multiple ioctl calling threads.
@@ -752,8 +739,6 @@ static struct tty_ldisc_ops gigaset_ldisc = {
752 .open = gigaset_tty_open, 739 .open = gigaset_tty_open,
753 .close = gigaset_tty_close, 740 .close = gigaset_tty_close,
754 .hangup = gigaset_tty_hangup, 741 .hangup = gigaset_tty_hangup,
755 .read = gigaset_tty_read,
756 .write = gigaset_tty_write,
757 .ioctl = gigaset_tty_ioctl, 742 .ioctl = gigaset_tty_ioctl,
758 .receive_buf = gigaset_tty_receive, 743 .receive_buf = gigaset_tty_receive,
759 .write_wakeup = gigaset_tty_wakeup, 744 .write_wakeup = gigaset_tty_wakeup,
diff --git a/drivers/leds/leds-max77693.c b/drivers/leds/leds-max77693.c
index b8b0eec7b540..df348a06d8c7 100644
--- a/drivers/leds/leds-max77693.c
+++ b/drivers/leds/leds-max77693.c
@@ -13,6 +13,7 @@
13 13
14#include <linux/led-class-flash.h> 14#include <linux/led-class-flash.h>
15#include <linux/mfd/max77693.h> 15#include <linux/mfd/max77693.h>
16#include <linux/mfd/max77693-common.h>
16#include <linux/mfd/max77693-private.h> 17#include <linux/mfd/max77693-private.h>
17#include <linux/module.h> 18#include <linux/module.h>
18#include <linux/mutex.h> 19#include <linux/mutex.h>
diff --git a/drivers/macintosh/ans-lcd.c b/drivers/macintosh/ans-lcd.c
index 1a57e88a38f7..cd35079c8c98 100644
--- a/drivers/macintosh/ans-lcd.c
+++ b/drivers/macintosh/ans-lcd.c
@@ -7,7 +7,7 @@
7#include <linux/kernel.h> 7#include <linux/kernel.h>
8#include <linux/miscdevice.h> 8#include <linux/miscdevice.h>
9#include <linux/fcntl.h> 9#include <linux/fcntl.h>
10#include <linux/init.h> 10#include <linux/module.h>
11#include <linux/delay.h> 11#include <linux/delay.h>
12#include <linux/fs.h> 12#include <linux/fs.h>
13 13
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index b59727309072..bfec3bdfe598 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -259,7 +259,7 @@ config DM_CRYPT
259 the ciphers you're going to use in the cryptoapi configuration. 259 the ciphers you're going to use in the cryptoapi configuration.
260 260
261 For further information on dm-crypt and userspace tools see: 261 For further information on dm-crypt and userspace tools see:
262 <http://code.google.com/p/cryptsetup/wiki/DMCrypt> 262 <https://gitlab.com/cryptsetup/cryptsetup/wikis/DMCrypt>
263 263
264 To compile this code as a module, choose M here: the module will 264 To compile this code as a module, choose M here: the module will
265 be called dm-crypt. 265 be called dm-crypt.
diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
index a08e3eeac3c5..79a6d63e8ed3 100644
--- a/drivers/md/bcache/closure.h
+++ b/drivers/md/bcache/closure.h
@@ -320,7 +320,6 @@ static inline void closure_wake_up(struct closure_waitlist *list)
320do { \ 320do { \
321 set_closure_fn(_cl, _fn, _wq); \ 321 set_closure_fn(_cl, _fn, _wq); \
322 closure_sub(_cl, CLOSURE_RUNNING + 1); \ 322 closure_sub(_cl, CLOSURE_RUNNING + 1); \
323 return; \
324} while (0) 323} while (0)
325 324
326/** 325/**
@@ -349,7 +348,6 @@ do { \
349do { \ 348do { \
350 set_closure_fn(_cl, _fn, _wq); \ 349 set_closure_fn(_cl, _fn, _wq); \
351 closure_queue(_cl); \ 350 closure_queue(_cl); \
352 return; \
353} while (0) 351} while (0)
354 352
355/** 353/**
@@ -365,7 +363,6 @@ do { \
365do { \ 363do { \
366 set_closure_fn(_cl, _destructor, NULL); \ 364 set_closure_fn(_cl, _destructor, NULL); \
367 closure_sub(_cl, CLOSURE_RUNNING - CLOSURE_DESTRUCTOR + 1); \ 365 closure_sub(_cl, CLOSURE_RUNNING - CLOSURE_DESTRUCTOR + 1); \
368 return; \
369} while (0) 366} while (0)
370 367
371/** 368/**
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index cb64e64a4789..bf6a9ca18403 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -105,6 +105,7 @@ void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p)
105 } while (n != bio); 105 } while (n != bio);
106 106
107 continue_at(&s->cl, bch_bio_submit_split_done, NULL); 107 continue_at(&s->cl, bch_bio_submit_split_done, NULL);
108 return;
108submit: 109submit:
109 generic_make_request(bio); 110 generic_make_request(bio);
110} 111}
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index ce64fc851251..418607a6ba33 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -592,12 +592,14 @@ static void journal_write_unlocked(struct closure *cl)
592 592
593 if (!w->need_write) { 593 if (!w->need_write) {
594 closure_return_with_destructor(cl, journal_write_unlock); 594 closure_return_with_destructor(cl, journal_write_unlock);
595 return;
595 } else if (journal_full(&c->journal)) { 596 } else if (journal_full(&c->journal)) {
596 journal_reclaim(c); 597 journal_reclaim(c);
597 spin_unlock(&c->journal.lock); 598 spin_unlock(&c->journal.lock);
598 599
599 btree_flush_write(c); 600 btree_flush_write(c);
600 continue_at(cl, journal_write, system_wq); 601 continue_at(cl, journal_write, system_wq);
602 return;
601 } 603 }
602 604
603 c->journal.blocks_free -= set_blocks(w->data, block_bytes(c)); 605 c->journal.blocks_free -= set_blocks(w->data, block_bytes(c));
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 4afb2d26b148..f292790997d7 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -88,8 +88,10 @@ static void bch_data_insert_keys(struct closure *cl)
88 if (journal_ref) 88 if (journal_ref)
89 atomic_dec_bug(journal_ref); 89 atomic_dec_bug(journal_ref);
90 90
91 if (!op->insert_data_done) 91 if (!op->insert_data_done) {
92 continue_at(cl, bch_data_insert_start, op->wq); 92 continue_at(cl, bch_data_insert_start, op->wq);
93 return;
94 }
93 95
94 bch_keylist_free(&op->insert_keys); 96 bch_keylist_free(&op->insert_keys);
95 closure_return(cl); 97 closure_return(cl);
@@ -216,8 +218,10 @@ static void bch_data_insert_start(struct closure *cl)
216 /* 1 for the device pointer and 1 for the chksum */ 218 /* 1 for the device pointer and 1 for the chksum */
217 if (bch_keylist_realloc(&op->insert_keys, 219 if (bch_keylist_realloc(&op->insert_keys,
218 3 + (op->csum ? 1 : 0), 220 3 + (op->csum ? 1 : 0),
219 op->c)) 221 op->c)) {
220 continue_at(cl, bch_data_insert_keys, op->wq); 222 continue_at(cl, bch_data_insert_keys, op->wq);
223 return;
224 }
221 225
222 k = op->insert_keys.top; 226 k = op->insert_keys.top;
223 bkey_init(k); 227 bkey_init(k);
@@ -255,6 +259,7 @@ static void bch_data_insert_start(struct closure *cl)
255 259
256 op->insert_data_done = true; 260 op->insert_data_done = true;
257 continue_at(cl, bch_data_insert_keys, op->wq); 261 continue_at(cl, bch_data_insert_keys, op->wq);
262 return;
258err: 263err:
259 /* bch_alloc_sectors() blocks if s->writeback = true */ 264 /* bch_alloc_sectors() blocks if s->writeback = true */
260 BUG_ON(op->writeback); 265 BUG_ON(op->writeback);
@@ -576,8 +581,10 @@ static void cache_lookup(struct closure *cl)
576 ret = bch_btree_map_keys(&s->op, s->iop.c, 581 ret = bch_btree_map_keys(&s->op, s->iop.c,
577 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0), 582 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
578 cache_lookup_fn, MAP_END_KEY); 583 cache_lookup_fn, MAP_END_KEY);
579 if (ret == -EAGAIN) 584 if (ret == -EAGAIN) {
580 continue_at(cl, cache_lookup, bcache_wq); 585 continue_at(cl, cache_lookup, bcache_wq);
586 return;
587 }
581 588
582 closure_return(cl); 589 closure_return(cl);
583} 590}
@@ -1085,6 +1092,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
1085 continue_at_nobarrier(&s->cl, 1092 continue_at_nobarrier(&s->cl,
1086 flash_dev_nodata, 1093 flash_dev_nodata,
1087 bcache_wq); 1094 bcache_wq);
1095 return;
1088 } else if (rw) { 1096 } else if (rw) {
1089 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, 1097 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
1090 &KEY(d->id, bio->bi_iter.bi_sector, 0), 1098 &KEY(d->id, bio->bi_iter.bi_sector, 0),
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index ed2346ddf4c9..e51de52eeb94 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -494,7 +494,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
494 bitmap_super_t *sb; 494 bitmap_super_t *sb;
495 unsigned long chunksize, daemon_sleep, write_behind; 495 unsigned long chunksize, daemon_sleep, write_behind;
496 496
497 bitmap->storage.sb_page = alloc_page(GFP_KERNEL); 497 bitmap->storage.sb_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
498 if (bitmap->storage.sb_page == NULL) 498 if (bitmap->storage.sb_page == NULL)
499 return -ENOMEM; 499 return -ENOMEM;
500 bitmap->storage.sb_page->index = 0; 500 bitmap->storage.sb_page->index = 0;
@@ -541,6 +541,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
541 sb->state = cpu_to_le32(bitmap->flags); 541 sb->state = cpu_to_le32(bitmap->flags);
542 bitmap->events_cleared = bitmap->mddev->events; 542 bitmap->events_cleared = bitmap->mddev->events;
543 sb->events_cleared = cpu_to_le64(bitmap->mddev->events); 543 sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
544 bitmap->mddev->bitmap_info.nodes = 0;
544 545
545 kunmap_atomic(sb); 546 kunmap_atomic(sb);
546 547
@@ -558,6 +559,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
558 unsigned long sectors_reserved = 0; 559 unsigned long sectors_reserved = 0;
559 int err = -EINVAL; 560 int err = -EINVAL;
560 struct page *sb_page; 561 struct page *sb_page;
562 loff_t offset = bitmap->mddev->bitmap_info.offset;
561 563
562 if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) { 564 if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) {
563 chunksize = 128 * 1024 * 1024; 565 chunksize = 128 * 1024 * 1024;
@@ -584,9 +586,9 @@ re_read:
584 bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t); 586 bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t);
585 /* to 4k blocks */ 587 /* to 4k blocks */
586 bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096); 588 bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096);
587 bitmap->mddev->bitmap_info.offset += bitmap->cluster_slot * (bm_blocks << 3); 589 offset = bitmap->mddev->bitmap_info.offset + (bitmap->cluster_slot * (bm_blocks << 3));
588 pr_info("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__, 590 pr_info("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__,
589 bitmap->cluster_slot, (unsigned long long)bitmap->mddev->bitmap_info.offset); 591 bitmap->cluster_slot, offset);
590 } 592 }
591 593
592 if (bitmap->storage.file) { 594 if (bitmap->storage.file) {
@@ -597,7 +599,7 @@ re_read:
597 bitmap, bytes, sb_page); 599 bitmap, bytes, sb_page);
598 } else { 600 } else {
599 err = read_sb_page(bitmap->mddev, 601 err = read_sb_page(bitmap->mddev,
600 bitmap->mddev->bitmap_info.offset, 602 offset,
601 sb_page, 603 sb_page,
602 0, sizeof(bitmap_super_t)); 604 0, sizeof(bitmap_super_t));
603 } 605 }
@@ -611,8 +613,16 @@ re_read:
611 daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ; 613 daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
612 write_behind = le32_to_cpu(sb->write_behind); 614 write_behind = le32_to_cpu(sb->write_behind);
613 sectors_reserved = le32_to_cpu(sb->sectors_reserved); 615 sectors_reserved = le32_to_cpu(sb->sectors_reserved);
614 nodes = le32_to_cpu(sb->nodes); 616 /* XXX: This is a hack to ensure that we don't use clustering
615 strlcpy(bitmap->mddev->bitmap_info.cluster_name, sb->cluster_name, 64); 617 * in case:
618 * - dm-raid is in use and
619 * - the nodes written in bitmap_sb is erroneous.
620 */
621 if (!bitmap->mddev->sync_super) {
622 nodes = le32_to_cpu(sb->nodes);
623 strlcpy(bitmap->mddev->bitmap_info.cluster_name,
624 sb->cluster_name, 64);
625 }
616 626
617 /* verify that the bitmap-specific fields are valid */ 627 /* verify that the bitmap-specific fields are valid */
618 if (sb->magic != cpu_to_le32(BITMAP_MAGIC)) 628 if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
@@ -671,7 +681,7 @@ out:
671 kunmap_atomic(sb); 681 kunmap_atomic(sb);
672 /* Assiging chunksize is required for "re_read" */ 682 /* Assiging chunksize is required for "re_read" */
673 bitmap->mddev->bitmap_info.chunksize = chunksize; 683 bitmap->mddev->bitmap_info.chunksize = chunksize;
674 if (nodes && (bitmap->cluster_slot < 0)) { 684 if (err == 0 && nodes && (bitmap->cluster_slot < 0)) {
675 err = md_setup_cluster(bitmap->mddev, nodes); 685 err = md_setup_cluster(bitmap->mddev, nodes);
676 if (err) { 686 if (err) {
677 pr_err("%s: Could not setup cluster service (%d)\n", 687 pr_err("%s: Could not setup cluster service (%d)\n",
@@ -1866,10 +1876,6 @@ int bitmap_copy_from_slot(struct mddev *mddev, int slot,
1866 if (IS_ERR(bitmap)) 1876 if (IS_ERR(bitmap))
1867 return PTR_ERR(bitmap); 1877 return PTR_ERR(bitmap);
1868 1878
1869 rv = bitmap_read_sb(bitmap);
1870 if (rv)
1871 goto err;
1872
1873 rv = bitmap_init_from_disk(bitmap, 0); 1879 rv = bitmap_init_from_disk(bitmap, 0);
1874 if (rv) 1880 if (rv)
1875 goto err; 1881 goto err;
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
index 32814371b8d3..aa1b41ca40f7 100644
--- a/drivers/md/dm-cache-policy-mq.c
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -1471,5 +1471,3 @@ module_exit(mq_exit);
1471MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 1471MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1472MODULE_LICENSE("GPL"); 1472MODULE_LICENSE("GPL");
1473MODULE_DESCRIPTION("mq cache policy"); 1473MODULE_DESCRIPTION("mq cache policy");
1474
1475MODULE_ALIAS("dm-cache-default");
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index b6f22651dd35..200366c62231 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -1686,7 +1686,7 @@ static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
1686 1686
1687 if (from_cblock(cache_size)) { 1687 if (from_cblock(cache_size)) {
1688 mq->cache_hit_bits = alloc_bitset(from_cblock(cache_size)); 1688 mq->cache_hit_bits = alloc_bitset(from_cblock(cache_size));
1689 if (!mq->cache_hit_bits && mq->cache_hit_bits) { 1689 if (!mq->cache_hit_bits) {
1690 DMERR("couldn't allocate cache hit bitset"); 1690 DMERR("couldn't allocate cache hit bitset");
1691 goto bad_cache_hit_bits; 1691 goto bad_cache_hit_bits;
1692 } 1692 }
@@ -1789,3 +1789,5 @@ module_exit(smq_exit);
1789MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 1789MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1790MODULE_LICENSE("GPL"); 1790MODULE_LICENSE("GPL");
1791MODULE_DESCRIPTION("smq cache policy"); 1791MODULE_DESCRIPTION("smq cache policy");
1792
1793MODULE_ALIAS("dm-cache-default");
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 1b4e1756b169..1fe93cfea7d3 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -1947,6 +1947,7 @@ static int commit_if_needed(struct cache *cache)
1947 1947
1948static void process_deferred_bios(struct cache *cache) 1948static void process_deferred_bios(struct cache *cache)
1949{ 1949{
1950 bool prealloc_used = false;
1950 unsigned long flags; 1951 unsigned long flags;
1951 struct bio_list bios; 1952 struct bio_list bios;
1952 struct bio *bio; 1953 struct bio *bio;
@@ -1966,6 +1967,7 @@ static void process_deferred_bios(struct cache *cache)
1966 * this bio might require one, we pause until there are some 1967 * this bio might require one, we pause until there are some
1967 * prepared mappings to process. 1968 * prepared mappings to process.
1968 */ 1969 */
1970 prealloc_used = true;
1969 if (prealloc_data_structs(cache, &structs)) { 1971 if (prealloc_data_structs(cache, &structs)) {
1970 spin_lock_irqsave(&cache->lock, flags); 1972 spin_lock_irqsave(&cache->lock, flags);
1971 bio_list_merge(&cache->deferred_bios, &bios); 1973 bio_list_merge(&cache->deferred_bios, &bios);
@@ -1983,11 +1985,13 @@ static void process_deferred_bios(struct cache *cache)
1983 process_bio(cache, &structs, bio); 1985 process_bio(cache, &structs, bio);
1984 } 1986 }
1985 1987
1986 prealloc_free_structs(cache, &structs); 1988 if (prealloc_used)
1989 prealloc_free_structs(cache, &structs);
1987} 1990}
1988 1991
1989static void process_deferred_cells(struct cache *cache) 1992static void process_deferred_cells(struct cache *cache)
1990{ 1993{
1994 bool prealloc_used = false;
1991 unsigned long flags; 1995 unsigned long flags;
1992 struct dm_bio_prison_cell *cell, *tmp; 1996 struct dm_bio_prison_cell *cell, *tmp;
1993 struct list_head cells; 1997 struct list_head cells;
@@ -2007,6 +2011,7 @@ static void process_deferred_cells(struct cache *cache)
2007 * this bio might require one, we pause until there are some 2011 * this bio might require one, we pause until there are some
2008 * prepared mappings to process. 2012 * prepared mappings to process.
2009 */ 2013 */
2014 prealloc_used = true;
2010 if (prealloc_data_structs(cache, &structs)) { 2015 if (prealloc_data_structs(cache, &structs)) {
2011 spin_lock_irqsave(&cache->lock, flags); 2016 spin_lock_irqsave(&cache->lock, flags);
2012 list_splice(&cells, &cache->deferred_cells); 2017 list_splice(&cells, &cache->deferred_cells);
@@ -2017,7 +2022,8 @@ static void process_deferred_cells(struct cache *cache)
2017 process_cell(cache, &structs, cell); 2022 process_cell(cache, &structs, cell);
2018 } 2023 }
2019 2024
2020 prealloc_free_structs(cache, &structs); 2025 if (prealloc_used)
2026 prealloc_free_structs(cache, &structs);
2021} 2027}
2022 2028
2023static void process_deferred_flush_bios(struct cache *cache, bool submit_bios) 2029static void process_deferred_flush_bios(struct cache *cache, bool submit_bios)
@@ -2062,7 +2068,7 @@ static void process_deferred_writethrough_bios(struct cache *cache)
2062 2068
2063static void writeback_some_dirty_blocks(struct cache *cache) 2069static void writeback_some_dirty_blocks(struct cache *cache)
2064{ 2070{
2065 int r = 0; 2071 bool prealloc_used = false;
2066 dm_oblock_t oblock; 2072 dm_oblock_t oblock;
2067 dm_cblock_t cblock; 2073 dm_cblock_t cblock;
2068 struct prealloc structs; 2074 struct prealloc structs;
@@ -2072,15 +2078,12 @@ static void writeback_some_dirty_blocks(struct cache *cache)
2072 memset(&structs, 0, sizeof(structs)); 2078 memset(&structs, 0, sizeof(structs));
2073 2079
2074 while (spare_migration_bandwidth(cache)) { 2080 while (spare_migration_bandwidth(cache)) {
2075 if (prealloc_data_structs(cache, &structs)) 2081 if (policy_writeback_work(cache->policy, &oblock, &cblock, busy))
2076 break; 2082 break; /* no work to do */
2077
2078 r = policy_writeback_work(cache->policy, &oblock, &cblock, busy);
2079 if (r)
2080 break;
2081 2083
2082 r = get_cell(cache, oblock, &structs, &old_ocell); 2084 prealloc_used = true;
2083 if (r) { 2085 if (prealloc_data_structs(cache, &structs) ||
2086 get_cell(cache, oblock, &structs, &old_ocell)) {
2084 policy_set_dirty(cache->policy, oblock); 2087 policy_set_dirty(cache->policy, oblock);
2085 break; 2088 break;
2086 } 2089 }
@@ -2088,7 +2091,8 @@ static void writeback_some_dirty_blocks(struct cache *cache)
2088 writeback(cache, &structs, oblock, cblock, old_ocell); 2091 writeback(cache, &structs, oblock, cblock, old_ocell);
2089 } 2092 }
2090 2093
2091 prealloc_free_structs(cache, &structs); 2094 if (prealloc_used)
2095 prealloc_free_structs(cache, &structs);
2092} 2096}
2093 2097
2094/*---------------------------------------------------------------- 2098/*----------------------------------------------------------------
@@ -3496,7 +3500,7 @@ static void cache_resume(struct dm_target *ti)
3496 * <#demotions> <#promotions> <#dirty> 3500 * <#demotions> <#promotions> <#dirty>
3497 * <#features> <features>* 3501 * <#features> <features>*
3498 * <#core args> <core args> 3502 * <#core args> <core args>
3499 * <policy name> <#policy args> <policy args>* <cache metadata mode> 3503 * <policy name> <#policy args> <policy args>* <cache metadata mode> <needs_check>
3500 */ 3504 */
3501static void cache_status(struct dm_target *ti, status_type_t type, 3505static void cache_status(struct dm_target *ti, status_type_t type,
3502 unsigned status_flags, char *result, unsigned maxlen) 3506 unsigned status_flags, char *result, unsigned maxlen)
@@ -3582,6 +3586,11 @@ static void cache_status(struct dm_target *ti, status_type_t type,
3582 else 3586 else
3583 DMEMIT("rw "); 3587 DMEMIT("rw ");
3584 3588
3589 if (dm_cache_metadata_needs_check(cache->cmd))
3590 DMEMIT("needs_check ");
3591 else
3592 DMEMIT("- ");
3593
3585 break; 3594 break;
3586 3595
3587 case STATUSTYPE_TABLE: 3596 case STATUSTYPE_TABLE:
@@ -3820,7 +3829,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
3820 3829
3821static struct target_type cache_target = { 3830static struct target_type cache_target = {
3822 .name = "cache", 3831 .name = "cache",
3823 .version = {1, 7, 0}, 3832 .version = {1, 8, 0},
3824 .module = THIS_MODULE, 3833 .module = THIS_MODULE,
3825 .ctr = cache_ctr, 3834 .ctr = cache_ctr,
3826 .dtr = cache_dtr, 3835 .dtr = cache_dtr,
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 48dfe3c4d6aa..6ba47cfb1443 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1293,8 +1293,8 @@ static int __release_metadata_snap(struct dm_pool_metadata *pmd)
1293 return r; 1293 return r;
1294 1294
1295 disk_super = dm_block_data(copy); 1295 disk_super = dm_block_data(copy);
1296 dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->data_mapping_root)); 1296 dm_btree_del(&pmd->info, le64_to_cpu(disk_super->data_mapping_root));
1297 dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->device_details_root)); 1297 dm_btree_del(&pmd->details_info, le64_to_cpu(disk_super->device_details_root));
1298 dm_sm_dec_block(pmd->metadata_sm, held_root); 1298 dm_sm_dec_block(pmd->metadata_sm, held_root);
1299 1299
1300 return dm_tm_unlock(pmd->tm, copy); 1300 return dm_tm_unlock(pmd->tm, copy);
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index c33f61a4cc28..d2bbe8cc1e97 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -18,6 +18,7 @@
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/vmalloc.h>
21#include <linux/sort.h> 22#include <linux/sort.h>
22#include <linux/rbtree.h> 23#include <linux/rbtree.h>
23 24
@@ -268,7 +269,7 @@ struct pool {
268 process_mapping_fn process_prepared_mapping; 269 process_mapping_fn process_prepared_mapping;
269 process_mapping_fn process_prepared_discard; 270 process_mapping_fn process_prepared_discard;
270 271
271 struct dm_bio_prison_cell *cell_sort_array[CELL_SORT_ARRAY_SIZE]; 272 struct dm_bio_prison_cell **cell_sort_array;
272}; 273};
273 274
274static enum pool_mode get_pool_mode(struct pool *pool); 275static enum pool_mode get_pool_mode(struct pool *pool);
@@ -665,16 +666,21 @@ static void requeue_io(struct thin_c *tc)
665 requeue_deferred_cells(tc); 666 requeue_deferred_cells(tc);
666} 667}
667 668
668static void error_retry_list(struct pool *pool) 669static void error_retry_list_with_code(struct pool *pool, int error)
669{ 670{
670 struct thin_c *tc; 671 struct thin_c *tc;
671 672
672 rcu_read_lock(); 673 rcu_read_lock();
673 list_for_each_entry_rcu(tc, &pool->active_thins, list) 674 list_for_each_entry_rcu(tc, &pool->active_thins, list)
674 error_thin_bio_list(tc, &tc->retry_on_resume_list, -EIO); 675 error_thin_bio_list(tc, &tc->retry_on_resume_list, error);
675 rcu_read_unlock(); 676 rcu_read_unlock();
676} 677}
677 678
679static void error_retry_list(struct pool *pool)
680{
681 return error_retry_list_with_code(pool, -EIO);
682}
683
678/* 684/*
679 * This section of code contains the logic for processing a thin device's IO. 685 * This section of code contains the logic for processing a thin device's IO.
680 * Much of the code depends on pool object resources (lists, workqueues, etc) 686 * Much of the code depends on pool object resources (lists, workqueues, etc)
@@ -2281,18 +2287,23 @@ static void do_waker(struct work_struct *ws)
2281 queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD); 2287 queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
2282} 2288}
2283 2289
2290static void notify_of_pool_mode_change_to_oods(struct pool *pool);
2291
2284/* 2292/*
2285 * We're holding onto IO to allow userland time to react. After the 2293 * We're holding onto IO to allow userland time to react. After the
2286 * timeout either the pool will have been resized (and thus back in 2294 * timeout either the pool will have been resized (and thus back in
2287 * PM_WRITE mode), or we degrade to PM_READ_ONLY and start erroring IO. 2295 * PM_WRITE mode), or we degrade to PM_OUT_OF_DATA_SPACE w/ error_if_no_space.
2288 */ 2296 */
2289static void do_no_space_timeout(struct work_struct *ws) 2297static void do_no_space_timeout(struct work_struct *ws)
2290{ 2298{
2291 struct pool *pool = container_of(to_delayed_work(ws), struct pool, 2299 struct pool *pool = container_of(to_delayed_work(ws), struct pool,
2292 no_space_timeout); 2300 no_space_timeout);
2293 2301
2294 if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) 2302 if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) {
2295 set_pool_mode(pool, PM_READ_ONLY); 2303 pool->pf.error_if_no_space = true;
2304 notify_of_pool_mode_change_to_oods(pool);
2305 error_retry_list_with_code(pool, -ENOSPC);
2306 }
2296} 2307}
2297 2308
2298/*----------------------------------------------------------------*/ 2309/*----------------------------------------------------------------*/
@@ -2370,6 +2381,14 @@ static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode)
2370 dm_device_name(pool->pool_md), new_mode); 2381 dm_device_name(pool->pool_md), new_mode);
2371} 2382}
2372 2383
2384static void notify_of_pool_mode_change_to_oods(struct pool *pool)
2385{
2386 if (!pool->pf.error_if_no_space)
2387 notify_of_pool_mode_change(pool, "out-of-data-space (queue IO)");
2388 else
2389 notify_of_pool_mode_change(pool, "out-of-data-space (error IO)");
2390}
2391
2373static bool passdown_enabled(struct pool_c *pt) 2392static bool passdown_enabled(struct pool_c *pt)
2374{ 2393{
2375 return pt->adjusted_pf.discard_passdown; 2394 return pt->adjusted_pf.discard_passdown;
@@ -2454,7 +2473,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
2454 * frequently seeing this mode. 2473 * frequently seeing this mode.
2455 */ 2474 */
2456 if (old_mode != new_mode) 2475 if (old_mode != new_mode)
2457 notify_of_pool_mode_change(pool, "out-of-data-space"); 2476 notify_of_pool_mode_change_to_oods(pool);
2458 pool->process_bio = process_bio_read_only; 2477 pool->process_bio = process_bio_read_only;
2459 pool->process_discard = process_discard_bio; 2478 pool->process_discard = process_discard_bio;
2460 pool->process_cell = process_cell_read_only; 2479 pool->process_cell = process_cell_read_only;
@@ -2777,6 +2796,7 @@ static void __pool_destroy(struct pool *pool)
2777{ 2796{
2778 __pool_table_remove(pool); 2797 __pool_table_remove(pool);
2779 2798
2799 vfree(pool->cell_sort_array);
2780 if (dm_pool_metadata_close(pool->pmd) < 0) 2800 if (dm_pool_metadata_close(pool->pmd) < 0)
2781 DMWARN("%s: dm_pool_metadata_close() failed.", __func__); 2801 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
2782 2802
@@ -2889,6 +2909,13 @@ static struct pool *pool_create(struct mapped_device *pool_md,
2889 goto bad_mapping_pool; 2909 goto bad_mapping_pool;
2890 } 2910 }
2891 2911
2912 pool->cell_sort_array = vmalloc(sizeof(*pool->cell_sort_array) * CELL_SORT_ARRAY_SIZE);
2913 if (!pool->cell_sort_array) {
2914 *error = "Error allocating cell sort array";
2915 err_p = ERR_PTR(-ENOMEM);
2916 goto bad_sort_array;
2917 }
2918
2892 pool->ref_count = 1; 2919 pool->ref_count = 1;
2893 pool->last_commit_jiffies = jiffies; 2920 pool->last_commit_jiffies = jiffies;
2894 pool->pool_md = pool_md; 2921 pool->pool_md = pool_md;
@@ -2897,6 +2924,8 @@ static struct pool *pool_create(struct mapped_device *pool_md,
2897 2924
2898 return pool; 2925 return pool;
2899 2926
2927bad_sort_array:
2928 mempool_destroy(pool->mapping_pool);
2900bad_mapping_pool: 2929bad_mapping_pool:
2901 dm_deferred_set_destroy(pool->all_io_ds); 2930 dm_deferred_set_destroy(pool->all_io_ds);
2902bad_all_io_ds: 2931bad_all_io_ds:
@@ -3714,6 +3743,7 @@ static void emit_flags(struct pool_features *pf, char *result,
3714 * Status line is: 3743 * Status line is:
3715 * <transaction id> <used metadata sectors>/<total metadata sectors> 3744 * <transaction id> <used metadata sectors>/<total metadata sectors>
3716 * <used data sectors>/<total data sectors> <held metadata root> 3745 * <used data sectors>/<total data sectors> <held metadata root>
3746 * <pool mode> <discard config> <no space config> <needs_check>
3717 */ 3747 */
3718static void pool_status(struct dm_target *ti, status_type_t type, 3748static void pool_status(struct dm_target *ti, status_type_t type,
3719 unsigned status_flags, char *result, unsigned maxlen) 3749 unsigned status_flags, char *result, unsigned maxlen)
@@ -3815,6 +3845,11 @@ static void pool_status(struct dm_target *ti, status_type_t type,
3815 else 3845 else
3816 DMEMIT("queue_if_no_space "); 3846 DMEMIT("queue_if_no_space ");
3817 3847
3848 if (dm_pool_metadata_needs_check(pool->pmd))
3849 DMEMIT("needs_check ");
3850 else
3851 DMEMIT("- ");
3852
3818 break; 3853 break;
3819 3854
3820 case STATUSTYPE_TABLE: 3855 case STATUSTYPE_TABLE:
@@ -3918,7 +3953,7 @@ static struct target_type pool_target = {
3918 .name = "thin-pool", 3953 .name = "thin-pool",
3919 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | 3954 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
3920 DM_TARGET_IMMUTABLE, 3955 DM_TARGET_IMMUTABLE,
3921 .version = {1, 15, 0}, 3956 .version = {1, 16, 0},
3922 .module = THIS_MODULE, 3957 .module = THIS_MODULE,
3923 .ctr = pool_ctr, 3958 .ctr = pool_ctr,
3924 .dtr = pool_dtr, 3959 .dtr = pool_dtr,
@@ -4305,7 +4340,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
4305 4340
4306static struct target_type thin_target = { 4341static struct target_type thin_target = {
4307 .name = "thin", 4342 .name = "thin",
4308 .version = {1, 15, 0}, 4343 .version = {1, 16, 0},
4309 .module = THIS_MODULE, 4344 .module = THIS_MODULE,
4310 .ctr = thin_ctr, 4345 .ctr = thin_ctr,
4311 .dtr = thin_dtr, 4346 .dtr = thin_dtr,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index f331d888e7f5..0d7ab20c58df 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1067,13 +1067,10 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig)
1067 */ 1067 */
1068static void rq_completed(struct mapped_device *md, int rw, bool run_queue) 1068static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
1069{ 1069{
1070 int nr_requests_pending;
1071
1072 atomic_dec(&md->pending[rw]); 1070 atomic_dec(&md->pending[rw]);
1073 1071
1074 /* nudge anyone waiting on suspend queue */ 1072 /* nudge anyone waiting on suspend queue */
1075 nr_requests_pending = md_in_flight(md); 1073 if (!md_in_flight(md))
1076 if (!nr_requests_pending)
1077 wake_up(&md->wait); 1074 wake_up(&md->wait);
1078 1075
1079 /* 1076 /*
@@ -1085,8 +1082,7 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
1085 if (run_queue) { 1082 if (run_queue) {
1086 if (md->queue->mq_ops) 1083 if (md->queue->mq_ops)
1087 blk_mq_run_hw_queues(md->queue, true); 1084 blk_mq_run_hw_queues(md->queue, true);
1088 else if (!nr_requests_pending || 1085 else
1089 (nr_requests_pending >= md->queue->nr_congestion_on))
1090 blk_run_queue_async(md->queue); 1086 blk_run_queue_async(md->queue);
1091 } 1087 }
1092 1088
@@ -1733,7 +1729,8 @@ static int dm_merge_bvec(struct request_queue *q,
1733 struct mapped_device *md = q->queuedata; 1729 struct mapped_device *md = q->queuedata;
1734 struct dm_table *map = dm_get_live_table_fast(md); 1730 struct dm_table *map = dm_get_live_table_fast(md);
1735 struct dm_target *ti; 1731 struct dm_target *ti;
1736 sector_t max_sectors, max_size = 0; 1732 sector_t max_sectors;
1733 int max_size = 0;
1737 1734
1738 if (unlikely(!map)) 1735 if (unlikely(!map))
1739 goto out; 1736 goto out;
@@ -1746,18 +1743,10 @@ static int dm_merge_bvec(struct request_queue *q,
1746 * Find maximum amount of I/O that won't need splitting 1743 * Find maximum amount of I/O that won't need splitting
1747 */ 1744 */
1748 max_sectors = min(max_io_len(bvm->bi_sector, ti), 1745 max_sectors = min(max_io_len(bvm->bi_sector, ti),
1749 (sector_t) queue_max_sectors(q)); 1746 (sector_t) BIO_MAX_SECTORS);
1750 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size; 1747 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
1751 1748 if (max_size < 0)
1752 /* 1749 max_size = 0;
1753 * FIXME: this stop-gap fix _must_ be cleaned up (by passing a sector_t
1754 * to the targets' merge function since it holds sectors not bytes).
1755 * Just doing this as an interim fix for stable@ because the more
1756 * comprehensive cleanup of switching to sector_t will impact every
1757 * DM target that implements a ->merge hook.
1758 */
1759 if (max_size > INT_MAX)
1760 max_size = INT_MAX;
1761 1750
1762 /* 1751 /*
1763 * merge_bvec_fn() returns number of bytes 1752 * merge_bvec_fn() returns number of bytes
@@ -1765,13 +1754,13 @@ static int dm_merge_bvec(struct request_queue *q,
1765 * max is precomputed maximal io size 1754 * max is precomputed maximal io size
1766 */ 1755 */
1767 if (max_size && ti->type->merge) 1756 if (max_size && ti->type->merge)
1768 max_size = ti->type->merge(ti, bvm, biovec, (int) max_size); 1757 max_size = ti->type->merge(ti, bvm, biovec, max_size);
1769 /* 1758 /*
1770 * If the target doesn't support merge method and some of the devices 1759 * If the target doesn't support merge method and some of the devices
1771 * provided their merge_bvec method (we know this by looking for the 1760 * provided their merge_bvec method (we know this by looking at
1772 * max_hw_sectors that dm_set_device_limits may set), then we can't 1761 * queue_max_hw_sectors), then we can't allow bios with multiple vector
1773 * allow bios with multiple vector entries. So always set max_size 1762 * entries. So always set max_size to 0, and the code below allows
1774 * to 0, and the code below allows just one page. 1763 * just one page.
1775 */ 1764 */
1776 else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9) 1765 else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
1777 max_size = 0; 1766 max_size = 0;
@@ -2281,8 +2270,6 @@ static void dm_init_old_md_queue(struct mapped_device *md)
2281 2270
2282static void cleanup_mapped_device(struct mapped_device *md) 2271static void cleanup_mapped_device(struct mapped_device *md)
2283{ 2272{
2284 cleanup_srcu_struct(&md->io_barrier);
2285
2286 if (md->wq) 2273 if (md->wq)
2287 destroy_workqueue(md->wq); 2274 destroy_workqueue(md->wq);
2288 if (md->kworker_task) 2275 if (md->kworker_task)
@@ -2294,6 +2281,8 @@ static void cleanup_mapped_device(struct mapped_device *md)
2294 if (md->bs) 2281 if (md->bs)
2295 bioset_free(md->bs); 2282 bioset_free(md->bs);
2296 2283
2284 cleanup_srcu_struct(&md->io_barrier);
2285
2297 if (md->disk) { 2286 if (md->disk) {
2298 spin_lock(&_minor_lock); 2287 spin_lock(&_minor_lock);
2299 md->disk->private_data = NULL; 2288 md->disk->private_data = NULL;
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index fcfc4b9b2672..0072190515e0 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -44,6 +44,7 @@ struct resync_info {
44 44
45/* md_cluster_info flags */ 45/* md_cluster_info flags */
46#define MD_CLUSTER_WAITING_FOR_NEWDISK 1 46#define MD_CLUSTER_WAITING_FOR_NEWDISK 1
47#define MD_CLUSTER_SUSPEND_READ_BALANCING 2
47 48
48 49
49struct md_cluster_info { 50struct md_cluster_info {
@@ -275,6 +276,9 @@ clear_bit:
275 276
276static void recover_prep(void *arg) 277static void recover_prep(void *arg)
277{ 278{
279 struct mddev *mddev = arg;
280 struct md_cluster_info *cinfo = mddev->cluster_info;
281 set_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state);
278} 282}
279 283
280static void recover_slot(void *arg, struct dlm_slot *slot) 284static void recover_slot(void *arg, struct dlm_slot *slot)
@@ -307,6 +311,7 @@ static void recover_done(void *arg, struct dlm_slot *slots,
307 311
308 cinfo->slot_number = our_slot; 312 cinfo->slot_number = our_slot;
309 complete(&cinfo->completion); 313 complete(&cinfo->completion);
314 clear_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state);
310} 315}
311 316
312static const struct dlm_lockspace_ops md_ls_ops = { 317static const struct dlm_lockspace_ops md_ls_ops = {
@@ -816,12 +821,17 @@ static void resync_finish(struct mddev *mddev)
816 resync_send(mddev, RESYNCING, 0, 0); 821 resync_send(mddev, RESYNCING, 0, 0);
817} 822}
818 823
819static int area_resyncing(struct mddev *mddev, sector_t lo, sector_t hi) 824static int area_resyncing(struct mddev *mddev, int direction,
825 sector_t lo, sector_t hi)
820{ 826{
821 struct md_cluster_info *cinfo = mddev->cluster_info; 827 struct md_cluster_info *cinfo = mddev->cluster_info;
822 int ret = 0; 828 int ret = 0;
823 struct suspend_info *s; 829 struct suspend_info *s;
824 830
831 if ((direction == READ) &&
832 test_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state))
833 return 1;
834
825 spin_lock_irq(&cinfo->suspend_lock); 835 spin_lock_irq(&cinfo->suspend_lock);
826 if (list_empty(&cinfo->suspend_list)) 836 if (list_empty(&cinfo->suspend_list))
827 goto out; 837 goto out;
diff --git a/drivers/md/md-cluster.h b/drivers/md/md-cluster.h
index 6817ee00e053..00defe2badbc 100644
--- a/drivers/md/md-cluster.h
+++ b/drivers/md/md-cluster.h
@@ -18,7 +18,7 @@ struct md_cluster_operations {
18 int (*metadata_update_start)(struct mddev *mddev); 18 int (*metadata_update_start)(struct mddev *mddev);
19 int (*metadata_update_finish)(struct mddev *mddev); 19 int (*metadata_update_finish)(struct mddev *mddev);
20 int (*metadata_update_cancel)(struct mddev *mddev); 20 int (*metadata_update_cancel)(struct mddev *mddev);
21 int (*area_resyncing)(struct mddev *mddev, sector_t lo, sector_t hi); 21 int (*area_resyncing)(struct mddev *mddev, int direction, sector_t lo, sector_t hi);
22 int (*add_new_disk_start)(struct mddev *mddev, struct md_rdev *rdev); 22 int (*add_new_disk_start)(struct mddev *mddev, struct md_rdev *rdev);
23 int (*add_new_disk_finish)(struct mddev *mddev); 23 int (*add_new_disk_finish)(struct mddev *mddev);
24 int (*new_disk_ack)(struct mddev *mddev, bool ack); 24 int (*new_disk_ack)(struct mddev *mddev, bool ack);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index d429c30cd514..e25f00f0138a 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5382,6 +5382,8 @@ static void __md_stop(struct mddev *mddev)
5382{ 5382{
5383 struct md_personality *pers = mddev->pers; 5383 struct md_personality *pers = mddev->pers;
5384 mddev_detach(mddev); 5384 mddev_detach(mddev);
5385 /* Ensure ->event_work is done */
5386 flush_workqueue(md_misc_wq);
5385 spin_lock(&mddev->lock); 5387 spin_lock(&mddev->lock);
5386 mddev->ready = 0; 5388 mddev->ready = 0;
5387 mddev->pers = NULL; 5389 mddev->pers = NULL;
@@ -5757,7 +5759,7 @@ static int get_bitmap_file(struct mddev *mddev, void __user * arg)
5757 char *ptr; 5759 char *ptr;
5758 int err; 5760 int err;
5759 5761
5760 file = kmalloc(sizeof(*file), GFP_NOIO); 5762 file = kzalloc(sizeof(*file), GFP_NOIO);
5761 if (!file) 5763 if (!file)
5762 return -ENOMEM; 5764 return -ENOMEM;
5763 5765
@@ -7437,7 +7439,7 @@ int md_setup_cluster(struct mddev *mddev, int nodes)
7437 err = request_module("md-cluster"); 7439 err = request_module("md-cluster");
7438 if (err) { 7440 if (err) {
7439 pr_err("md-cluster module not found.\n"); 7441 pr_err("md-cluster module not found.\n");
7440 return err; 7442 return -ENOENT;
7441 } 7443 }
7442 7444
7443 spin_lock(&pers_lock); 7445 spin_lock(&pers_lock);
diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
index bf2b80d5c470..8731b6ea026b 100644
--- a/drivers/md/persistent-data/dm-btree-internal.h
+++ b/drivers/md/persistent-data/dm-btree-internal.h
@@ -138,4 +138,10 @@ int lower_bound(struct btree_node *n, uint64_t key);
138 138
139extern struct dm_block_validator btree_node_validator; 139extern struct dm_block_validator btree_node_validator;
140 140
141/*
142 * Value type for upper levels of multi-level btrees.
143 */
144extern void init_le64_type(struct dm_transaction_manager *tm,
145 struct dm_btree_value_type *vt);
146
141#endif /* DM_BTREE_INTERNAL_H */ 147#endif /* DM_BTREE_INTERNAL_H */
diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
index e04cfd2d60ef..4222f774cf36 100644
--- a/drivers/md/persistent-data/dm-btree-remove.c
+++ b/drivers/md/persistent-data/dm-btree-remove.c
@@ -309,8 +309,8 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
309 309
310 if (s < 0 && nr_center < -s) { 310 if (s < 0 && nr_center < -s) {
311 /* not enough in central node */ 311 /* not enough in central node */
312 shift(left, center, nr_center); 312 shift(left, center, -nr_center);
313 s = nr_center - target; 313 s += nr_center;
314 shift(left, right, s); 314 shift(left, right, s);
315 nr_right += s; 315 nr_right += s;
316 } else 316 } else
@@ -323,7 +323,7 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
323 if (s > 0 && nr_center < s) { 323 if (s > 0 && nr_center < s) {
324 /* not enough in central node */ 324 /* not enough in central node */
325 shift(center, right, nr_center); 325 shift(center, right, nr_center);
326 s = target - nr_center; 326 s -= nr_center;
327 shift(left, right, s); 327 shift(left, right, s);
328 nr_left -= s; 328 nr_left -= s;
329 } else 329 } else
@@ -544,14 +544,6 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
544 return r; 544 return r;
545} 545}
546 546
547static struct dm_btree_value_type le64_type = {
548 .context = NULL,
549 .size = sizeof(__le64),
550 .inc = NULL,
551 .dec = NULL,
552 .equal = NULL
553};
554
555int dm_btree_remove(struct dm_btree_info *info, dm_block_t root, 547int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
556 uint64_t *keys, dm_block_t *new_root) 548 uint64_t *keys, dm_block_t *new_root)
557{ 549{
@@ -559,12 +551,14 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
559 int index = 0, r = 0; 551 int index = 0, r = 0;
560 struct shadow_spine spine; 552 struct shadow_spine spine;
561 struct btree_node *n; 553 struct btree_node *n;
554 struct dm_btree_value_type le64_vt;
562 555
556 init_le64_type(info->tm, &le64_vt);
563 init_shadow_spine(&spine, info); 557 init_shadow_spine(&spine, info);
564 for (level = 0; level < info->levels; level++) { 558 for (level = 0; level < info->levels; level++) {
565 r = remove_raw(&spine, info, 559 r = remove_raw(&spine, info,
566 (level == last_level ? 560 (level == last_level ?
567 &info->value_type : &le64_type), 561 &info->value_type : &le64_vt),
568 root, keys[level], (unsigned *)&index); 562 root, keys[level], (unsigned *)&index);
569 if (r < 0) 563 if (r < 0)
570 break; 564 break;
@@ -654,11 +648,13 @@ static int remove_one(struct dm_btree_info *info, dm_block_t root,
654 int index = 0, r = 0; 648 int index = 0, r = 0;
655 struct shadow_spine spine; 649 struct shadow_spine spine;
656 struct btree_node *n; 650 struct btree_node *n;
651 struct dm_btree_value_type le64_vt;
657 uint64_t k; 652 uint64_t k;
658 653
654 init_le64_type(info->tm, &le64_vt);
659 init_shadow_spine(&spine, info); 655 init_shadow_spine(&spine, info);
660 for (level = 0; level < last_level; level++) { 656 for (level = 0; level < last_level; level++) {
661 r = remove_raw(&spine, info, &le64_type, 657 r = remove_raw(&spine, info, &le64_vt,
662 root, keys[level], (unsigned *) &index); 658 root, keys[level], (unsigned *) &index);
663 if (r < 0) 659 if (r < 0)
664 goto out; 660 goto out;
@@ -689,6 +685,7 @@ static int remove_one(struct dm_btree_info *info, dm_block_t root,
689 value_ptr(n, index)); 685 value_ptr(n, index));
690 686
691 delete_at(n, index); 687 delete_at(n, index);
688 keys[last_level] = k + 1ull;
692 689
693 } else 690 } else
694 r = -ENODATA; 691 r = -ENODATA;
diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c
index 1b5e13ec7f96..0dee514ba4c5 100644
--- a/drivers/md/persistent-data/dm-btree-spine.c
+++ b/drivers/md/persistent-data/dm-btree-spine.c
@@ -249,3 +249,40 @@ int shadow_root(struct shadow_spine *s)
249{ 249{
250 return s->root; 250 return s->root;
251} 251}
252
253static void le64_inc(void *context, const void *value_le)
254{
255 struct dm_transaction_manager *tm = context;
256 __le64 v_le;
257
258 memcpy(&v_le, value_le, sizeof(v_le));
259 dm_tm_inc(tm, le64_to_cpu(v_le));
260}
261
262static void le64_dec(void *context, const void *value_le)
263{
264 struct dm_transaction_manager *tm = context;
265 __le64 v_le;
266
267 memcpy(&v_le, value_le, sizeof(v_le));
268 dm_tm_dec(tm, le64_to_cpu(v_le));
269}
270
271static int le64_equal(void *context, const void *value1_le, const void *value2_le)
272{
273 __le64 v1_le, v2_le;
274
275 memcpy(&v1_le, value1_le, sizeof(v1_le));
276 memcpy(&v2_le, value2_le, sizeof(v2_le));
277 return v1_le == v2_le;
278}
279
280void init_le64_type(struct dm_transaction_manager *tm,
281 struct dm_btree_value_type *vt)
282{
283 vt->context = tm;
284 vt->size = sizeof(__le64);
285 vt->inc = le64_inc;
286 vt->dec = le64_dec;
287 vt->equal = le64_equal;
288}
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index 200ac12a1d40..c7726cebc495 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -255,7 +255,7 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
255 int r; 255 int r;
256 struct del_stack *s; 256 struct del_stack *s;
257 257
258 s = kmalloc(sizeof(*s), GFP_KERNEL); 258 s = kmalloc(sizeof(*s), GFP_NOIO);
259 if (!s) 259 if (!s)
260 return -ENOMEM; 260 return -ENOMEM;
261 s->info = info; 261 s->info = info;
@@ -667,12 +667,7 @@ static int insert(struct dm_btree_info *info, dm_block_t root,
667 struct btree_node *n; 667 struct btree_node *n;
668 struct dm_btree_value_type le64_type; 668 struct dm_btree_value_type le64_type;
669 669
670 le64_type.context = NULL; 670 init_le64_type(info->tm, &le64_type);
671 le64_type.size = sizeof(__le64);
672 le64_type.inc = NULL;
673 le64_type.dec = NULL;
674 le64_type.equal = NULL;
675
676 init_shadow_spine(&spine, info); 671 init_shadow_spine(&spine, info);
677 672
678 for (level = 0; level < (info->levels - 1); level++) { 673 for (level = 0; level < (info->levels - 1); level++) {
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index f80f1af61ce7..967a4ed73929 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -336,7 +336,7 @@ static void raid1_end_read_request(struct bio *bio, int error)
336 spin_lock_irqsave(&conf->device_lock, flags); 336 spin_lock_irqsave(&conf->device_lock, flags);
337 if (r1_bio->mddev->degraded == conf->raid_disks || 337 if (r1_bio->mddev->degraded == conf->raid_disks ||
338 (r1_bio->mddev->degraded == conf->raid_disks-1 && 338 (r1_bio->mddev->degraded == conf->raid_disks-1 &&
339 !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags))) 339 test_bit(In_sync, &conf->mirrors[mirror].rdev->flags)))
340 uptodate = 1; 340 uptodate = 1;
341 spin_unlock_irqrestore(&conf->device_lock, flags); 341 spin_unlock_irqrestore(&conf->device_lock, flags);
342 } 342 }
@@ -541,7 +541,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
541 541
542 if ((conf->mddev->recovery_cp < this_sector + sectors) || 542 if ((conf->mddev->recovery_cp < this_sector + sectors) ||
543 (mddev_is_clustered(conf->mddev) && 543 (mddev_is_clustered(conf->mddev) &&
544 md_cluster_ops->area_resyncing(conf->mddev, this_sector, 544 md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
545 this_sector + sectors))) 545 this_sector + sectors)))
546 choose_first = 1; 546 choose_first = 1;
547 else 547 else
@@ -1111,7 +1111,8 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1111 ((bio_end_sector(bio) > mddev->suspend_lo && 1111 ((bio_end_sector(bio) > mddev->suspend_lo &&
1112 bio->bi_iter.bi_sector < mddev->suspend_hi) || 1112 bio->bi_iter.bi_sector < mddev->suspend_hi) ||
1113 (mddev_is_clustered(mddev) && 1113 (mddev_is_clustered(mddev) &&
1114 md_cluster_ops->area_resyncing(mddev, bio->bi_iter.bi_sector, bio_end_sector(bio))))) { 1114 md_cluster_ops->area_resyncing(mddev, WRITE,
1115 bio->bi_iter.bi_sector, bio_end_sector(bio))))) {
1115 /* As the suspend_* range is controlled by 1116 /* As the suspend_* range is controlled by
1116 * userspace, we want an interruptible 1117 * userspace, we want an interruptible
1117 * wait. 1118 * wait.
@@ -1124,7 +1125,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1124 if (bio_end_sector(bio) <= mddev->suspend_lo || 1125 if (bio_end_sector(bio) <= mddev->suspend_lo ||
1125 bio->bi_iter.bi_sector >= mddev->suspend_hi || 1126 bio->bi_iter.bi_sector >= mddev->suspend_hi ||
1126 (mddev_is_clustered(mddev) && 1127 (mddev_is_clustered(mddev) &&
1127 !md_cluster_ops->area_resyncing(mddev, 1128 !md_cluster_ops->area_resyncing(mddev, WRITE,
1128 bio->bi_iter.bi_sector, bio_end_sector(bio)))) 1129 bio->bi_iter.bi_sector, bio_end_sector(bio))))
1129 break; 1130 break;
1130 schedule(); 1131 schedule();
@@ -1475,6 +1476,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
1475{ 1476{
1476 char b[BDEVNAME_SIZE]; 1477 char b[BDEVNAME_SIZE];
1477 struct r1conf *conf = mddev->private; 1478 struct r1conf *conf = mddev->private;
1479 unsigned long flags;
1478 1480
1479 /* 1481 /*
1480 * If it is not operational, then we have already marked it as dead 1482 * If it is not operational, then we have already marked it as dead
@@ -1494,14 +1496,13 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
1494 return; 1496 return;
1495 } 1497 }
1496 set_bit(Blocked, &rdev->flags); 1498 set_bit(Blocked, &rdev->flags);
1499 spin_lock_irqsave(&conf->device_lock, flags);
1497 if (test_and_clear_bit(In_sync, &rdev->flags)) { 1500 if (test_and_clear_bit(In_sync, &rdev->flags)) {
1498 unsigned long flags;
1499 spin_lock_irqsave(&conf->device_lock, flags);
1500 mddev->degraded++; 1501 mddev->degraded++;
1501 set_bit(Faulty, &rdev->flags); 1502 set_bit(Faulty, &rdev->flags);
1502 spin_unlock_irqrestore(&conf->device_lock, flags);
1503 } else 1503 } else
1504 set_bit(Faulty, &rdev->flags); 1504 set_bit(Faulty, &rdev->flags);
1505 spin_unlock_irqrestore(&conf->device_lock, flags);
1505 /* 1506 /*
1506 * if recovery is running, make sure it aborts. 1507 * if recovery is running, make sure it aborts.
1507 */ 1508 */
@@ -1567,7 +1568,10 @@ static int raid1_spare_active(struct mddev *mddev)
1567 * Find all failed disks within the RAID1 configuration 1568 * Find all failed disks within the RAID1 configuration
1568 * and mark them readable. 1569 * and mark them readable.
1569 * Called under mddev lock, so rcu protection not needed. 1570 * Called under mddev lock, so rcu protection not needed.
1571 * device_lock used to avoid races with raid1_end_read_request
1572 * which expects 'In_sync' flags and ->degraded to be consistent.
1570 */ 1573 */
1574 spin_lock_irqsave(&conf->device_lock, flags);
1571 for (i = 0; i < conf->raid_disks; i++) { 1575 for (i = 0; i < conf->raid_disks; i++) {
1572 struct md_rdev *rdev = conf->mirrors[i].rdev; 1576 struct md_rdev *rdev = conf->mirrors[i].rdev;
1573 struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev; 1577 struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
@@ -1598,7 +1602,6 @@ static int raid1_spare_active(struct mddev *mddev)
1598 sysfs_notify_dirent_safe(rdev->sysfs_state); 1602 sysfs_notify_dirent_safe(rdev->sysfs_state);
1599 } 1603 }
1600 } 1604 }
1601 spin_lock_irqsave(&conf->device_lock, flags);
1602 mddev->degraded -= count; 1605 mddev->degraded -= count;
1603 spin_unlock_irqrestore(&conf->device_lock, flags); 1606 spin_unlock_irqrestore(&conf->device_lock, flags);
1604 1607
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 940f2f365461..38c58e19cfce 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -3556,6 +3556,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
3556 /* far_copies must be 1 */ 3556 /* far_copies must be 1 */
3557 conf->prev.stride = conf->dev_sectors; 3557 conf->prev.stride = conf->dev_sectors;
3558 } 3558 }
3559 conf->reshape_safe = conf->reshape_progress;
3559 spin_lock_init(&conf->device_lock); 3560 spin_lock_init(&conf->device_lock);
3560 INIT_LIST_HEAD(&conf->retry_list); 3561 INIT_LIST_HEAD(&conf->retry_list);
3561 3562
@@ -3760,7 +3761,6 @@ static int run(struct mddev *mddev)
3760 } 3761 }
3761 conf->offset_diff = min_offset_diff; 3762 conf->offset_diff = min_offset_diff;
3762 3763
3763 conf->reshape_safe = conf->reshape_progress;
3764 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 3764 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3765 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 3765 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
3766 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 3766 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
@@ -4103,6 +4103,7 @@ static int raid10_start_reshape(struct mddev *mddev)
4103 conf->reshape_progress = size; 4103 conf->reshape_progress = size;
4104 } else 4104 } else
4105 conf->reshape_progress = 0; 4105 conf->reshape_progress = 0;
4106 conf->reshape_safe = conf->reshape_progress;
4106 spin_unlock_irq(&conf->device_lock); 4107 spin_unlock_irq(&conf->device_lock);
4107 4108
4108 if (mddev->delta_disks && mddev->bitmap) { 4109 if (mddev->delta_disks && mddev->bitmap) {
@@ -4170,6 +4171,7 @@ abort:
4170 rdev->new_data_offset = rdev->data_offset; 4171 rdev->new_data_offset = rdev->data_offset;
4171 smp_wmb(); 4172 smp_wmb();
4172 conf->reshape_progress = MaxSector; 4173 conf->reshape_progress = MaxSector;
4174 conf->reshape_safe = MaxSector;
4173 mddev->reshape_position = MaxSector; 4175 mddev->reshape_position = MaxSector;
4174 spin_unlock_irq(&conf->device_lock); 4176 spin_unlock_irq(&conf->device_lock);
4175 return ret; 4177 return ret;
@@ -4524,6 +4526,7 @@ static void end_reshape(struct r10conf *conf)
4524 md_finish_reshape(conf->mddev); 4526 md_finish_reshape(conf->mddev);
4525 smp_wmb(); 4527 smp_wmb();
4526 conf->reshape_progress = MaxSector; 4528 conf->reshape_progress = MaxSector;
4529 conf->reshape_safe = MaxSector;
4527 spin_unlock_irq(&conf->device_lock); 4530 spin_unlock_irq(&conf->device_lock);
4528 4531
4529 /* read-ahead size must cover two whole stripes, which is 4532 /* read-ahead size must cover two whole stripes, which is
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 59e44e99eef3..f757023fc458 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2162,6 +2162,9 @@ static int resize_stripes(struct r5conf *conf, int newsize)
2162 if (!sc) 2162 if (!sc)
2163 return -ENOMEM; 2163 return -ENOMEM;
2164 2164
2165 /* Need to ensure auto-resizing doesn't interfere */
2166 mutex_lock(&conf->cache_size_mutex);
2167
2165 for (i = conf->max_nr_stripes; i; i--) { 2168 for (i = conf->max_nr_stripes; i; i--) {
2166 nsh = alloc_stripe(sc, GFP_KERNEL); 2169 nsh = alloc_stripe(sc, GFP_KERNEL);
2167 if (!nsh) 2170 if (!nsh)
@@ -2178,6 +2181,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
2178 kmem_cache_free(sc, nsh); 2181 kmem_cache_free(sc, nsh);
2179 } 2182 }
2180 kmem_cache_destroy(sc); 2183 kmem_cache_destroy(sc);
2184 mutex_unlock(&conf->cache_size_mutex);
2181 return -ENOMEM; 2185 return -ENOMEM;
2182 } 2186 }
2183 /* Step 2 - Must use GFP_NOIO now. 2187 /* Step 2 - Must use GFP_NOIO now.
@@ -2224,6 +2228,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
2224 } else 2228 } else
2225 err = -ENOMEM; 2229 err = -ENOMEM;
2226 2230
2231 mutex_unlock(&conf->cache_size_mutex);
2227 /* Step 4, return new stripes to service */ 2232 /* Step 4, return new stripes to service */
2228 while(!list_empty(&newstripes)) { 2233 while(!list_empty(&newstripes)) {
2229 nsh = list_entry(newstripes.next, struct stripe_head, lru); 2234 nsh = list_entry(newstripes.next, struct stripe_head, lru);
@@ -2251,7 +2256,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
2251static int drop_one_stripe(struct r5conf *conf) 2256static int drop_one_stripe(struct r5conf *conf)
2252{ 2257{
2253 struct stripe_head *sh; 2258 struct stripe_head *sh;
2254 int hash = (conf->max_nr_stripes - 1) % NR_STRIPE_HASH_LOCKS; 2259 int hash = (conf->max_nr_stripes - 1) & STRIPE_HASH_LOCKS_MASK;
2255 2260
2256 spin_lock_irq(conf->hash_locks + hash); 2261 spin_lock_irq(conf->hash_locks + hash);
2257 sh = get_free_stripe(conf, hash); 2262 sh = get_free_stripe(conf, hash);
@@ -4061,8 +4066,10 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
4061 &first_bad, &bad_sectors)) 4066 &first_bad, &bad_sectors))
4062 set_bit(R5_ReadRepl, &dev->flags); 4067 set_bit(R5_ReadRepl, &dev->flags);
4063 else { 4068 else {
4064 if (rdev) 4069 if (rdev && !test_bit(Faulty, &rdev->flags))
4065 set_bit(R5_NeedReplace, &dev->flags); 4070 set_bit(R5_NeedReplace, &dev->flags);
4071 else
4072 clear_bit(R5_NeedReplace, &dev->flags);
4066 rdev = rcu_dereference(conf->disks[i].rdev); 4073 rdev = rcu_dereference(conf->disks[i].rdev);
4067 clear_bit(R5_ReadRepl, &dev->flags); 4074 clear_bit(R5_ReadRepl, &dev->flags);
4068 } 4075 }
@@ -5857,12 +5864,14 @@ static void raid5d(struct md_thread *thread)
5857 pr_debug("%d stripes handled\n", handled); 5864 pr_debug("%d stripes handled\n", handled);
5858 5865
5859 spin_unlock_irq(&conf->device_lock); 5866 spin_unlock_irq(&conf->device_lock);
5860 if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state)) { 5867 if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state) &&
5868 mutex_trylock(&conf->cache_size_mutex)) {
5861 grow_one_stripe(conf, __GFP_NOWARN); 5869 grow_one_stripe(conf, __GFP_NOWARN);
5862 /* Set flag even if allocation failed. This helps 5870 /* Set flag even if allocation failed. This helps
5863 * slow down allocation requests when mem is short 5871 * slow down allocation requests when mem is short
5864 */ 5872 */
5865 set_bit(R5_DID_ALLOC, &conf->cache_state); 5873 set_bit(R5_DID_ALLOC, &conf->cache_state);
5874 mutex_unlock(&conf->cache_size_mutex);
5866 } 5875 }
5867 5876
5868 async_tx_issue_pending_all(); 5877 async_tx_issue_pending_all();
@@ -5894,18 +5903,22 @@ raid5_set_cache_size(struct mddev *mddev, int size)
5894 return -EINVAL; 5903 return -EINVAL;
5895 5904
5896 conf->min_nr_stripes = size; 5905 conf->min_nr_stripes = size;
5906 mutex_lock(&conf->cache_size_mutex);
5897 while (size < conf->max_nr_stripes && 5907 while (size < conf->max_nr_stripes &&
5898 drop_one_stripe(conf)) 5908 drop_one_stripe(conf))
5899 ; 5909 ;
5910 mutex_unlock(&conf->cache_size_mutex);
5900 5911
5901 5912
5902 err = md_allow_write(mddev); 5913 err = md_allow_write(mddev);
5903 if (err) 5914 if (err)
5904 return err; 5915 return err;
5905 5916
5917 mutex_lock(&conf->cache_size_mutex);
5906 while (size > conf->max_nr_stripes) 5918 while (size > conf->max_nr_stripes)
5907 if (!grow_one_stripe(conf, GFP_KERNEL)) 5919 if (!grow_one_stripe(conf, GFP_KERNEL))
5908 break; 5920 break;
5921 mutex_unlock(&conf->cache_size_mutex);
5909 5922
5910 return 0; 5923 return 0;
5911} 5924}
@@ -6371,11 +6384,19 @@ static unsigned long raid5_cache_scan(struct shrinker *shrink,
6371 struct shrink_control *sc) 6384 struct shrink_control *sc)
6372{ 6385{
6373 struct r5conf *conf = container_of(shrink, struct r5conf, shrinker); 6386 struct r5conf *conf = container_of(shrink, struct r5conf, shrinker);
6374 int ret = 0; 6387 unsigned long ret = SHRINK_STOP;
6375 while (ret < sc->nr_to_scan) { 6388
6376 if (drop_one_stripe(conf) == 0) 6389 if (mutex_trylock(&conf->cache_size_mutex)) {
6377 return SHRINK_STOP; 6390 ret= 0;
6378 ret++; 6391 while (ret < sc->nr_to_scan &&
6392 conf->max_nr_stripes > conf->min_nr_stripes) {
6393 if (drop_one_stripe(conf) == 0) {
6394 ret = SHRINK_STOP;
6395 break;
6396 }
6397 ret++;
6398 }
6399 mutex_unlock(&conf->cache_size_mutex);
6379 } 6400 }
6380 return ret; 6401 return ret;
6381} 6402}
@@ -6444,6 +6465,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
6444 goto abort; 6465 goto abort;
6445 spin_lock_init(&conf->device_lock); 6466 spin_lock_init(&conf->device_lock);
6446 seqcount_init(&conf->gen_lock); 6467 seqcount_init(&conf->gen_lock);
6468 mutex_init(&conf->cache_size_mutex);
6447 init_waitqueue_head(&conf->wait_for_quiescent); 6469 init_waitqueue_head(&conf->wait_for_quiescent);
6448 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) { 6470 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) {
6449 init_waitqueue_head(&conf->wait_for_stripe[i]); 6471 init_waitqueue_head(&conf->wait_for_stripe[i]);
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 02c3bf8fbfe7..d05144278690 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -482,7 +482,8 @@ struct r5conf {
482 */ 482 */
483 int active_name; 483 int active_name;
484 char cache_name[2][32]; 484 char cache_name[2][32];
485 struct kmem_cache *slab_cache; /* for allocating stripes */ 485 struct kmem_cache *slab_cache; /* for allocating stripes */
486 struct mutex cache_size_mutex; /* Protect changes to cache size */
486 487
487 int seq_flush, seq_write; 488 int seq_flush, seq_write;
488 int quiesce; 489 int quiesce;
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index 0d35f5850ff1..5ab90f36a6a6 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -240,7 +240,7 @@ config DVB_SI21XX
240 240
241config DVB_TS2020 241config DVB_TS2020
242 tristate "Montage Tehnology TS2020 based tuners" 242 tristate "Montage Tehnology TS2020 based tuners"
243 depends on DVB_CORE 243 depends on DVB_CORE && I2C
244 select REGMAP_I2C 244 select REGMAP_I2C
245 default m if !MEDIA_SUBDRV_AUTOSELECT 245 default m if !MEDIA_SUBDRV_AUTOSELECT
246 help 246 help
diff --git a/drivers/media/pci/cobalt/Kconfig b/drivers/media/pci/cobalt/Kconfig
index 3be1b2c3c386..6a1c0089bb62 100644
--- a/drivers/media/pci/cobalt/Kconfig
+++ b/drivers/media/pci/cobalt/Kconfig
@@ -2,6 +2,7 @@ config VIDEO_COBALT
2 tristate "Cisco Cobalt support" 2 tristate "Cisco Cobalt support"
3 depends on VIDEO_V4L2 && I2C && MEDIA_CONTROLLER 3 depends on VIDEO_V4L2 && I2C && MEDIA_CONTROLLER
4 depends on PCI_MSI && MTD_COMPLEX_MAPPINGS && GPIOLIB 4 depends on PCI_MSI && MTD_COMPLEX_MAPPINGS && GPIOLIB
5 depends on SND
5 select I2C_ALGOBIT 6 select I2C_ALGOBIT
6 select VIDEO_ADV7604 7 select VIDEO_ADV7604
7 select VIDEO_ADV7511 8 select VIDEO_ADV7511
diff --git a/drivers/media/pci/cobalt/cobalt-irq.c b/drivers/media/pci/cobalt/cobalt-irq.c
index dd4bff9cf339..d1f5898d11ba 100644
--- a/drivers/media/pci/cobalt/cobalt-irq.c
+++ b/drivers/media/pci/cobalt/cobalt-irq.c
@@ -139,7 +139,7 @@ done:
139 also know about dropped frames. */ 139 also know about dropped frames. */
140 cb->vb.v4l2_buf.sequence = s->sequence++; 140 cb->vb.v4l2_buf.sequence = s->sequence++;
141 vb2_buffer_done(&cb->vb, (skip || s->unstable_frame) ? 141 vb2_buffer_done(&cb->vb, (skip || s->unstable_frame) ?
142 VB2_BUF_STATE_QUEUED : VB2_BUF_STATE_DONE); 142 VB2_BUF_STATE_REQUEUEING : VB2_BUF_STATE_DONE);
143} 143}
144 144
145irqreturn_t cobalt_irq_handler(int irq, void *dev_id) 145irqreturn_t cobalt_irq_handler(int irq, void *dev_id)
diff --git a/drivers/media/pci/ivtv/ivtvfb.c b/drivers/media/pci/ivtv/ivtvfb.c
index 4cb365d4ffdc..8b95eefb610b 100644
--- a/drivers/media/pci/ivtv/ivtvfb.c
+++ b/drivers/media/pci/ivtv/ivtvfb.c
@@ -38,6 +38,8 @@
38 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 38 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
39 */ 39 */
40 40
41#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
42
41#include <linux/module.h> 43#include <linux/module.h>
42#include <linux/kernel.h> 44#include <linux/kernel.h>
43#include <linux/fb.h> 45#include <linux/fb.h>
@@ -1171,6 +1173,13 @@ static int ivtvfb_init_card(struct ivtv *itv)
1171{ 1173{
1172 int rc; 1174 int rc;
1173 1175
1176#ifdef CONFIG_X86_64
1177 if (pat_enabled()) {
1178 pr_warn("ivtvfb needs PAT disabled, boot with nopat kernel parameter\n");
1179 return -ENODEV;
1180 }
1181#endif
1182
1174 if (itv->osd_info) { 1183 if (itv->osd_info) {
1175 IVTVFB_ERR("Card %d already initialised\n", ivtvfb_card_id); 1184 IVTVFB_ERR("Card %d already initialised\n", ivtvfb_card_id);
1176 return -EBUSY; 1185 return -EBUSY;
@@ -1265,12 +1274,6 @@ static int __init ivtvfb_init(void)
1265 int registered = 0; 1274 int registered = 0;
1266 int err; 1275 int err;
1267 1276
1268#ifdef CONFIG_X86_64
1269 if (WARN(pat_enabled(),
1270 "ivtvfb needs PAT disabled, boot with nopat kernel parameter\n")) {
1271 return -ENODEV;
1272 }
1273#endif
1274 1277
1275 if (ivtvfb_card_id < -1 || ivtvfb_card_id >= IVTV_MAX_CARDS) { 1278 if (ivtvfb_card_id < -1 || ivtvfb_card_id >= IVTV_MAX_CARDS) {
1276 printk(KERN_ERR "ivtvfb: ivtvfb_card_id parameter is out of range (valid range: -1 - %d)\n", 1279 printk(KERN_ERR "ivtvfb: ivtvfb_card_id parameter is out of range (valid range: -1 - %d)\n",
diff --git a/drivers/media/pci/mantis/mantis_dma.c b/drivers/media/pci/mantis/mantis_dma.c
index 1d59c7e039f7..87990ece5848 100644
--- a/drivers/media/pci/mantis/mantis_dma.c
+++ b/drivers/media/pci/mantis/mantis_dma.c
@@ -130,10 +130,11 @@ err:
130 130
131int mantis_dma_init(struct mantis_pci *mantis) 131int mantis_dma_init(struct mantis_pci *mantis)
132{ 132{
133 int err = 0; 133 int err;
134 134
135 dprintk(MANTIS_DEBUG, 1, "Mantis DMA init"); 135 dprintk(MANTIS_DEBUG, 1, "Mantis DMA init");
136 if (mantis_alloc_buffers(mantis) < 0) { 136 err = mantis_alloc_buffers(mantis);
137 if (err < 0) {
137 dprintk(MANTIS_ERROR, 1, "Error allocating DMA buffer"); 138 dprintk(MANTIS_ERROR, 1, "Error allocating DMA buffer");
138 139
139 /* Stop RISC Engine */ 140 /* Stop RISC Engine */
diff --git a/drivers/media/rc/ir-rc5-decoder.c b/drivers/media/rc/ir-rc5-decoder.c
index 8939ebd74391..84fa6e9b59a1 100644
--- a/drivers/media/rc/ir-rc5-decoder.c
+++ b/drivers/media/rc/ir-rc5-decoder.c
@@ -184,125 +184,9 @@ out:
184 return -EINVAL; 184 return -EINVAL;
185} 185}
186 186
187static struct ir_raw_timings_manchester ir_rc5_timings = {
188 .leader = RC5_UNIT,
189 .pulse_space_start = 0,
190 .clock = RC5_UNIT,
191 .trailer_space = RC5_UNIT * 10,
192};
193
194static struct ir_raw_timings_manchester ir_rc5x_timings[2] = {
195 {
196 .leader = RC5_UNIT,
197 .pulse_space_start = 0,
198 .clock = RC5_UNIT,
199 .trailer_space = RC5X_SPACE,
200 },
201 {
202 .clock = RC5_UNIT,
203 .trailer_space = RC5_UNIT * 10,
204 },
205};
206
207static struct ir_raw_timings_manchester ir_rc5_sz_timings = {
208 .leader = RC5_UNIT,
209 .pulse_space_start = 0,
210 .clock = RC5_UNIT,
211 .trailer_space = RC5_UNIT * 10,
212};
213
214static int ir_rc5_validate_filter(const struct rc_scancode_filter *scancode,
215 unsigned int important_bits)
216{
217 /* all important bits of scancode should be set in mask */
218 if (~scancode->mask & important_bits)
219 return -EINVAL;
220 /* extra bits in mask should be zero in data */
221 if (scancode->mask & scancode->data & ~important_bits)
222 return -EINVAL;
223 return 0;
224}
225
226/**
227 * ir_rc5_encode() - Encode a scancode as a stream of raw events
228 *
229 * @protocols: allowed protocols
230 * @scancode: scancode filter describing scancode (helps distinguish between
231 * protocol subtypes when scancode is ambiguous)
232 * @events: array of raw ir events to write into
233 * @max: maximum size of @events
234 *
235 * Returns: The number of events written.
236 * -ENOBUFS if there isn't enough space in the array to fit the
237 * encoding. In this case all @max events will have been written.
238 * -EINVAL if the scancode is ambiguous or invalid.
239 */
240static int ir_rc5_encode(u64 protocols,
241 const struct rc_scancode_filter *scancode,
242 struct ir_raw_event *events, unsigned int max)
243{
244 int ret;
245 struct ir_raw_event *e = events;
246 unsigned int data, xdata, command, commandx, system;
247
248 /* Detect protocol and convert scancode to raw data */
249 if (protocols & RC_BIT_RC5 &&
250 !ir_rc5_validate_filter(scancode, 0x1f7f)) {
251 /* decode scancode */
252 command = (scancode->data & 0x003f) >> 0;
253 commandx = (scancode->data & 0x0040) >> 6;
254 system = (scancode->data & 0x1f00) >> 8;
255 /* encode data */
256 data = !commandx << 12 | system << 6 | command;
257
258 /* Modulate the data */
259 ret = ir_raw_gen_manchester(&e, max, &ir_rc5_timings, RC5_NBITS,
260 data);
261 if (ret < 0)
262 return ret;
263 } else if (protocols & RC_BIT_RC5X &&
264 !ir_rc5_validate_filter(scancode, 0x1f7f3f)) {
265 /* decode scancode */
266 xdata = (scancode->data & 0x00003f) >> 0;
267 command = (scancode->data & 0x003f00) >> 8;
268 commandx = (scancode->data & 0x004000) >> 14;
269 system = (scancode->data & 0x1f0000) >> 16;
270 /* commandx and system overlap, bits must match when encoded */
271 if (commandx == (system & 0x1))
272 return -EINVAL;
273 /* encode data */
274 data = 1 << 18 | system << 12 | command << 6 | xdata;
275
276 /* Modulate the data */
277 ret = ir_raw_gen_manchester(&e, max, &ir_rc5x_timings[0],
278 CHECK_RC5X_NBITS,
279 data >> (RC5X_NBITS-CHECK_RC5X_NBITS));
280 if (ret < 0)
281 return ret;
282 ret = ir_raw_gen_manchester(&e, max - (e - events),
283 &ir_rc5x_timings[1],
284 RC5X_NBITS - CHECK_RC5X_NBITS,
285 data);
286 if (ret < 0)
287 return ret;
288 } else if (protocols & RC_BIT_RC5_SZ &&
289 !ir_rc5_validate_filter(scancode, 0x2fff)) {
290 /* RC5-SZ scancode is raw enough for Manchester as it is */
291 ret = ir_raw_gen_manchester(&e, max, &ir_rc5_sz_timings,
292 RC5_SZ_NBITS, scancode->data & 0x2fff);
293 if (ret < 0)
294 return ret;
295 } else {
296 return -EINVAL;
297 }
298
299 return e - events;
300}
301
302static struct ir_raw_handler rc5_handler = { 187static struct ir_raw_handler rc5_handler = {
303 .protocols = RC_BIT_RC5 | RC_BIT_RC5X | RC_BIT_RC5_SZ, 188 .protocols = RC_BIT_RC5 | RC_BIT_RC5X | RC_BIT_RC5_SZ,
304 .decode = ir_rc5_decode, 189 .decode = ir_rc5_decode,
305 .encode = ir_rc5_encode,
306}; 190};
307 191
308static int __init ir_rc5_decode_init(void) 192static int __init ir_rc5_decode_init(void)
diff --git a/drivers/media/rc/ir-rc6-decoder.c b/drivers/media/rc/ir-rc6-decoder.c
index f9c70baf6e0c..d16bc67af732 100644
--- a/drivers/media/rc/ir-rc6-decoder.c
+++ b/drivers/media/rc/ir-rc6-decoder.c
@@ -291,133 +291,11 @@ out:
291 return -EINVAL; 291 return -EINVAL;
292} 292}
293 293
294static struct ir_raw_timings_manchester ir_rc6_timings[4] = {
295 {
296 .leader = RC6_PREFIX_PULSE,
297 .pulse_space_start = 0,
298 .clock = RC6_UNIT,
299 .invert = 1,
300 .trailer_space = RC6_PREFIX_SPACE,
301 },
302 {
303 .clock = RC6_UNIT,
304 .invert = 1,
305 },
306 {
307 .clock = RC6_UNIT * 2,
308 .invert = 1,
309 },
310 {
311 .clock = RC6_UNIT,
312 .invert = 1,
313 .trailer_space = RC6_SUFFIX_SPACE,
314 },
315};
316
317static int ir_rc6_validate_filter(const struct rc_scancode_filter *scancode,
318 unsigned int important_bits)
319{
320 /* all important bits of scancode should be set in mask */
321 if (~scancode->mask & important_bits)
322 return -EINVAL;
323 /* extra bits in mask should be zero in data */
324 if (scancode->mask & scancode->data & ~important_bits)
325 return -EINVAL;
326 return 0;
327}
328
329/**
330 * ir_rc6_encode() - Encode a scancode as a stream of raw events
331 *
332 * @protocols: allowed protocols
333 * @scancode: scancode filter describing scancode (helps distinguish between
334 * protocol subtypes when scancode is ambiguous)
335 * @events: array of raw ir events to write into
336 * @max: maximum size of @events
337 *
338 * Returns: The number of events written.
339 * -ENOBUFS if there isn't enough space in the array to fit the
340 * encoding. In this case all @max events will have been written.
341 * -EINVAL if the scancode is ambiguous or invalid.
342 */
343static int ir_rc6_encode(u64 protocols,
344 const struct rc_scancode_filter *scancode,
345 struct ir_raw_event *events, unsigned int max)
346{
347 int ret;
348 struct ir_raw_event *e = events;
349
350 if (protocols & RC_BIT_RC6_0 &&
351 !ir_rc6_validate_filter(scancode, 0xffff)) {
352
353 /* Modulate the preamble */
354 ret = ir_raw_gen_manchester(&e, max, &ir_rc6_timings[0], 0, 0);
355 if (ret < 0)
356 return ret;
357
358 /* Modulate the header (Start Bit & Mode-0) */
359 ret = ir_raw_gen_manchester(&e, max - (e - events),
360 &ir_rc6_timings[1],
361 RC6_HEADER_NBITS, (1 << 3));
362 if (ret < 0)
363 return ret;
364
365 /* Modulate Trailer Bit */
366 ret = ir_raw_gen_manchester(&e, max - (e - events),
367 &ir_rc6_timings[2], 1, 0);
368 if (ret < 0)
369 return ret;
370
371 /* Modulate rest of the data */
372 ret = ir_raw_gen_manchester(&e, max - (e - events),
373 &ir_rc6_timings[3], RC6_0_NBITS,
374 scancode->data);
375 if (ret < 0)
376 return ret;
377
378 } else if (protocols & (RC_BIT_RC6_6A_20 | RC_BIT_RC6_6A_24 |
379 RC_BIT_RC6_6A_32 | RC_BIT_RC6_MCE) &&
380 !ir_rc6_validate_filter(scancode, 0x8fffffff)) {
381
382 /* Modulate the preamble */
383 ret = ir_raw_gen_manchester(&e, max, &ir_rc6_timings[0], 0, 0);
384 if (ret < 0)
385 return ret;
386
387 /* Modulate the header (Start Bit & Header-version 6 */
388 ret = ir_raw_gen_manchester(&e, max - (e - events),
389 &ir_rc6_timings[1],
390 RC6_HEADER_NBITS, (1 << 3 | 6));
391 if (ret < 0)
392 return ret;
393
394 /* Modulate Trailer Bit */
395 ret = ir_raw_gen_manchester(&e, max - (e - events),
396 &ir_rc6_timings[2], 1, 0);
397 if (ret < 0)
398 return ret;
399
400 /* Modulate rest of the data */
401 ret = ir_raw_gen_manchester(&e, max - (e - events),
402 &ir_rc6_timings[3],
403 fls(scancode->mask),
404 scancode->data);
405 if (ret < 0)
406 return ret;
407
408 } else {
409 return -EINVAL;
410 }
411
412 return e - events;
413}
414
415static struct ir_raw_handler rc6_handler = { 294static struct ir_raw_handler rc6_handler = {
416 .protocols = RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 | 295 .protocols = RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 |
417 RC_BIT_RC6_6A_24 | RC_BIT_RC6_6A_32 | 296 RC_BIT_RC6_6A_24 | RC_BIT_RC6_6A_32 |
418 RC_BIT_RC6_MCE, 297 RC_BIT_RC6_MCE,
419 .decode = ir_rc6_decode, 298 .decode = ir_rc6_decode,
420 .encode = ir_rc6_encode,
421}; 299};
422 300
423static int __init ir_rc6_decode_init(void) 301static int __init ir_rc6_decode_init(void)
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index baeb5971fd52..85af7a869167 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -526,130 +526,6 @@ static int nvt_set_tx_carrier(struct rc_dev *dev, u32 carrier)
526 return 0; 526 return 0;
527} 527}
528 528
529static int nvt_write_wakeup_codes(struct rc_dev *dev,
530 const u8 *wakeup_sample_buf, int count)
531{
532 int i = 0;
533 u8 reg, reg_learn_mode;
534 unsigned long flags;
535 struct nvt_dev *nvt = dev->priv;
536
537 nvt_dbg_wake("writing wakeup samples");
538
539 reg = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON);
540 reg_learn_mode = reg & ~CIR_WAKE_IRCON_MODE0;
541 reg_learn_mode |= CIR_WAKE_IRCON_MODE1;
542
543 /* Lock the learn area to prevent racing with wake-isr */
544 spin_lock_irqsave(&nvt->nvt_lock, flags);
545
546 /* Enable fifo writes */
547 nvt_cir_wake_reg_write(nvt, reg_learn_mode, CIR_WAKE_IRCON);
548
549 /* Clear cir wake rx fifo */
550 nvt_clear_cir_wake_fifo(nvt);
551
552 if (count > WAKE_FIFO_LEN) {
553 nvt_dbg_wake("HW FIFO too small for all wake samples");
554 count = WAKE_FIFO_LEN;
555 }
556
557 if (count)
558 pr_info("Wake samples (%d) =", count);
559 else
560 pr_info("Wake sample fifo cleared");
561
562 /* Write wake samples to fifo */
563 for (i = 0; i < count; i++) {
564 pr_cont(" %02x", wakeup_sample_buf[i]);
565 nvt_cir_wake_reg_write(nvt, wakeup_sample_buf[i],
566 CIR_WAKE_WR_FIFO_DATA);
567 }
568 pr_cont("\n");
569
570 /* Switch cir to wakeup mode and disable fifo writing */
571 nvt_cir_wake_reg_write(nvt, reg, CIR_WAKE_IRCON);
572
573 /* Set number of bytes needed for wake */
574 nvt_cir_wake_reg_write(nvt, count ? count :
575 CIR_WAKE_FIFO_CMP_BYTES,
576 CIR_WAKE_FIFO_CMP_DEEP);
577
578 spin_unlock_irqrestore(&nvt->nvt_lock, flags);
579
580 return 0;
581}
582
583static int nvt_ir_raw_set_wakeup_filter(struct rc_dev *dev,
584 struct rc_scancode_filter *sc_filter)
585{
586 u8 *reg_buf;
587 u8 buf_val;
588 int i, ret, count;
589 unsigned int val;
590 struct ir_raw_event *raw;
591 bool complete;
592
593 /* Require both mask and data to be set before actually committing */
594 if (!sc_filter->mask || !sc_filter->data)
595 return 0;
596
597 raw = kmalloc_array(WAKE_FIFO_LEN, sizeof(*raw), GFP_KERNEL);
598 if (!raw)
599 return -ENOMEM;
600
601 ret = ir_raw_encode_scancode(dev->enabled_wakeup_protocols, sc_filter,
602 raw, WAKE_FIFO_LEN);
603 complete = (ret != -ENOBUFS);
604 if (!complete)
605 ret = WAKE_FIFO_LEN;
606 else if (ret < 0)
607 goto out_raw;
608
609 reg_buf = kmalloc_array(WAKE_FIFO_LEN, sizeof(*reg_buf), GFP_KERNEL);
610 if (!reg_buf) {
611 ret = -ENOMEM;
612 goto out_raw;
613 }
614
615 /* Inspect the ir samples */
616 for (i = 0, count = 0; i < ret && count < WAKE_FIFO_LEN; ++i) {
617 val = NS_TO_US((raw[i]).duration) / SAMPLE_PERIOD;
618
619 /* Split too large values into several smaller ones */
620 while (val > 0 && count < WAKE_FIFO_LEN) {
621
622 /* Skip last value for better comparison tolerance */
623 if (complete && i == ret - 1 && val < BUF_LEN_MASK)
624 break;
625
626 /* Clamp values to BUF_LEN_MASK at most */
627 buf_val = (val > BUF_LEN_MASK) ? BUF_LEN_MASK : val;
628
629 reg_buf[count] = buf_val;
630 val -= buf_val;
631 if ((raw[i]).pulse)
632 reg_buf[count] |= BUF_PULSE_BIT;
633 count++;
634 }
635 }
636
637 ret = nvt_write_wakeup_codes(dev, reg_buf, count);
638
639 kfree(reg_buf);
640out_raw:
641 kfree(raw);
642
643 return ret;
644}
645
646/* Dummy implementation. nuvoton is agnostic to the protocol used */
647static int nvt_ir_raw_change_wakeup_protocol(struct rc_dev *dev,
648 u64 *rc_type)
649{
650 return 0;
651}
652
653/* 529/*
654 * nvt_tx_ir 530 * nvt_tx_ir
655 * 531 *
@@ -1167,14 +1043,11 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
1167 /* Set up the rc device */ 1043 /* Set up the rc device */
1168 rdev->priv = nvt; 1044 rdev->priv = nvt;
1169 rdev->driver_type = RC_DRIVER_IR_RAW; 1045 rdev->driver_type = RC_DRIVER_IR_RAW;
1170 rdev->encode_wakeup = true;
1171 rdev->allowed_protocols = RC_BIT_ALL; 1046 rdev->allowed_protocols = RC_BIT_ALL;
1172 rdev->open = nvt_open; 1047 rdev->open = nvt_open;
1173 rdev->close = nvt_close; 1048 rdev->close = nvt_close;
1174 rdev->tx_ir = nvt_tx_ir; 1049 rdev->tx_ir = nvt_tx_ir;
1175 rdev->s_tx_carrier = nvt_set_tx_carrier; 1050 rdev->s_tx_carrier = nvt_set_tx_carrier;
1176 rdev->s_wakeup_filter = nvt_ir_raw_set_wakeup_filter;
1177 rdev->change_wakeup_protocol = nvt_ir_raw_change_wakeup_protocol;
1178 rdev->input_name = "Nuvoton w836x7hg Infrared Remote Transceiver"; 1051 rdev->input_name = "Nuvoton w836x7hg Infrared Remote Transceiver";
1179 rdev->input_phys = "nuvoton/cir0"; 1052 rdev->input_phys = "nuvoton/cir0";
1180 rdev->input_id.bustype = BUS_HOST; 1053 rdev->input_id.bustype = BUS_HOST;
diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h
index 9d0e161c2a88..e1cf23c3875b 100644
--- a/drivers/media/rc/nuvoton-cir.h
+++ b/drivers/media/rc/nuvoton-cir.h
@@ -63,7 +63,6 @@ static int debug;
63 */ 63 */
64#define TX_BUF_LEN 256 64#define TX_BUF_LEN 256
65#define RX_BUF_LEN 32 65#define RX_BUF_LEN 32
66#define WAKE_FIFO_LEN 67
67 66
68struct nvt_dev { 67struct nvt_dev {
69 struct pnp_dev *pdev; 68 struct pnp_dev *pdev;
diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h
index 4b994aa2f2a7..b68d4f762734 100644
--- a/drivers/media/rc/rc-core-priv.h
+++ b/drivers/media/rc/rc-core-priv.h
@@ -25,8 +25,6 @@ struct ir_raw_handler {
25 25
26 u64 protocols; /* which are handled by this handler */ 26 u64 protocols; /* which are handled by this handler */
27 int (*decode)(struct rc_dev *dev, struct ir_raw_event event); 27 int (*decode)(struct rc_dev *dev, struct ir_raw_event event);
28 int (*encode)(u64 protocols, const struct rc_scancode_filter *scancode,
29 struct ir_raw_event *events, unsigned int max);
30 28
31 /* These two should only be used by the lirc decoder */ 29 /* These two should only be used by the lirc decoder */
32 int (*raw_register)(struct rc_dev *dev); 30 int (*raw_register)(struct rc_dev *dev);
@@ -152,44 +150,10 @@ static inline bool is_timing_event(struct ir_raw_event ev)
152#define TO_US(duration) DIV_ROUND_CLOSEST((duration), 1000) 150#define TO_US(duration) DIV_ROUND_CLOSEST((duration), 1000)
153#define TO_STR(is_pulse) ((is_pulse) ? "pulse" : "space") 151#define TO_STR(is_pulse) ((is_pulse) ? "pulse" : "space")
154 152
155/* functions for IR encoders */
156
157static inline void init_ir_raw_event_duration(struct ir_raw_event *ev,
158 unsigned int pulse,
159 u32 duration)
160{
161 init_ir_raw_event(ev);
162 ev->duration = duration;
163 ev->pulse = pulse;
164}
165
166/**
167 * struct ir_raw_timings_manchester - Manchester coding timings
168 * @leader: duration of leader pulse (if any) 0 if continuing
169 * existing signal (see @pulse_space_start)
170 * @pulse_space_start: 1 for starting with pulse (0 for starting with space)
171 * @clock: duration of each pulse/space in ns
172 * @invert: if set clock logic is inverted
173 * (0 = space + pulse, 1 = pulse + space)
174 * @trailer_space: duration of trailer space in ns
175 */
176struct ir_raw_timings_manchester {
177 unsigned int leader;
178 unsigned int pulse_space_start:1;
179 unsigned int clock;
180 unsigned int invert:1;
181 unsigned int trailer_space;
182};
183
184int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
185 const struct ir_raw_timings_manchester *timings,
186 unsigned int n, unsigned int data);
187
188/* 153/*
189 * Routines from rc-raw.c to be used internally and by decoders 154 * Routines from rc-raw.c to be used internally and by decoders
190 */ 155 */
191u64 ir_raw_get_allowed_protocols(void); 156u64 ir_raw_get_allowed_protocols(void);
192u64 ir_raw_get_encode_protocols(void);
193int ir_raw_event_register(struct rc_dev *dev); 157int ir_raw_event_register(struct rc_dev *dev);
194void ir_raw_event_unregister(struct rc_dev *dev); 158void ir_raw_event_unregister(struct rc_dev *dev);
195int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler); 159int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler);
diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c
index b9e4645c731c..b732ac6a26d8 100644
--- a/drivers/media/rc/rc-ir-raw.c
+++ b/drivers/media/rc/rc-ir-raw.c
@@ -30,7 +30,6 @@ static LIST_HEAD(ir_raw_client_list);
30static DEFINE_MUTEX(ir_raw_handler_lock); 30static DEFINE_MUTEX(ir_raw_handler_lock);
31static LIST_HEAD(ir_raw_handler_list); 31static LIST_HEAD(ir_raw_handler_list);
32static u64 available_protocols; 32static u64 available_protocols;
33static u64 encode_protocols;
34 33
35static int ir_raw_event_thread(void *data) 34static int ir_raw_event_thread(void *data)
36{ 35{
@@ -241,146 +240,12 @@ ir_raw_get_allowed_protocols(void)
241 return protocols; 240 return protocols;
242} 241}
243 242
244/* used internally by the sysfs interface */
245u64
246ir_raw_get_encode_protocols(void)
247{
248 u64 protocols;
249
250 mutex_lock(&ir_raw_handler_lock);
251 protocols = encode_protocols;
252 mutex_unlock(&ir_raw_handler_lock);
253 return protocols;
254}
255
256static int change_protocol(struct rc_dev *dev, u64 *rc_type) 243static int change_protocol(struct rc_dev *dev, u64 *rc_type)
257{ 244{
258 /* the caller will update dev->enabled_protocols */ 245 /* the caller will update dev->enabled_protocols */
259 return 0; 246 return 0;
260} 247}
261 248
262/**
263 * ir_raw_gen_manchester() - Encode data with Manchester (bi-phase) modulation.
264 * @ev: Pointer to pointer to next free event. *@ev is incremented for
265 * each raw event filled.
266 * @max: Maximum number of raw events to fill.
267 * @timings: Manchester modulation timings.
268 * @n: Number of bits of data.
269 * @data: Data bits to encode.
270 *
271 * Encodes the @n least significant bits of @data using Manchester (bi-phase)
272 * modulation with the timing characteristics described by @timings, writing up
273 * to @max raw IR events using the *@ev pointer.
274 *
275 * Returns: 0 on success.
276 * -ENOBUFS if there isn't enough space in the array to fit the
277 * full encoded data. In this case all @max events will have been
278 * written.
279 */
280int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
281 const struct ir_raw_timings_manchester *timings,
282 unsigned int n, unsigned int data)
283{
284 bool need_pulse;
285 unsigned int i;
286 int ret = -ENOBUFS;
287
288 i = 1 << (n - 1);
289
290 if (timings->leader) {
291 if (!max--)
292 return ret;
293 if (timings->pulse_space_start) {
294 init_ir_raw_event_duration((*ev)++, 1, timings->leader);
295
296 if (!max--)
297 return ret;
298 init_ir_raw_event_duration((*ev), 0, timings->leader);
299 } else {
300 init_ir_raw_event_duration((*ev), 1, timings->leader);
301 }
302 i >>= 1;
303 } else {
304 /* continue existing signal */
305 --(*ev);
306 }
307 /* from here on *ev will point to the last event rather than the next */
308
309 while (n && i > 0) {
310 need_pulse = !(data & i);
311 if (timings->invert)
312 need_pulse = !need_pulse;
313 if (need_pulse == !!(*ev)->pulse) {
314 (*ev)->duration += timings->clock;
315 } else {
316 if (!max--)
317 goto nobufs;
318 init_ir_raw_event_duration(++(*ev), need_pulse,
319 timings->clock);
320 }
321
322 if (!max--)
323 goto nobufs;
324 init_ir_raw_event_duration(++(*ev), !need_pulse,
325 timings->clock);
326 i >>= 1;
327 }
328
329 if (timings->trailer_space) {
330 if (!(*ev)->pulse)
331 (*ev)->duration += timings->trailer_space;
332 else if (!max--)
333 goto nobufs;
334 else
335 init_ir_raw_event_duration(++(*ev), 0,
336 timings->trailer_space);
337 }
338
339 ret = 0;
340nobufs:
341 /* point to the next event rather than last event before returning */
342 ++(*ev);
343 return ret;
344}
345EXPORT_SYMBOL(ir_raw_gen_manchester);
346
347/**
348 * ir_raw_encode_scancode() - Encode a scancode as raw events
349 *
350 * @protocols: permitted protocols
351 * @scancode: scancode filter describing a single scancode
352 * @events: array of raw events to write into
353 * @max: max number of raw events
354 *
355 * Attempts to encode the scancode as raw events.
356 *
357 * Returns: The number of events written.
358 * -ENOBUFS if there isn't enough space in the array to fit the
359 * encoding. In this case all @max events will have been written.
360 * -EINVAL if the scancode is ambiguous or invalid, or if no
361 * compatible encoder was found.
362 */
363int ir_raw_encode_scancode(u64 protocols,
364 const struct rc_scancode_filter *scancode,
365 struct ir_raw_event *events, unsigned int max)
366{
367 struct ir_raw_handler *handler;
368 int ret = -EINVAL;
369
370 mutex_lock(&ir_raw_handler_lock);
371 list_for_each_entry(handler, &ir_raw_handler_list, list) {
372 if (handler->protocols & protocols && handler->encode) {
373 ret = handler->encode(protocols, scancode, events, max);
374 if (ret >= 0 || ret == -ENOBUFS)
375 break;
376 }
377 }
378 mutex_unlock(&ir_raw_handler_lock);
379
380 return ret;
381}
382EXPORT_SYMBOL(ir_raw_encode_scancode);
383
384/* 249/*
385 * Used to (un)register raw event clients 250 * Used to (un)register raw event clients
386 */ 251 */
@@ -463,8 +328,6 @@ int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
463 list_for_each_entry(raw, &ir_raw_client_list, list) 328 list_for_each_entry(raw, &ir_raw_client_list, list)
464 ir_raw_handler->raw_register(raw->dev); 329 ir_raw_handler->raw_register(raw->dev);
465 available_protocols |= ir_raw_handler->protocols; 330 available_protocols |= ir_raw_handler->protocols;
466 if (ir_raw_handler->encode)
467 encode_protocols |= ir_raw_handler->protocols;
468 mutex_unlock(&ir_raw_handler_lock); 331 mutex_unlock(&ir_raw_handler_lock);
469 332
470 return 0; 333 return 0;
@@ -481,8 +344,6 @@ void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
481 list_for_each_entry(raw, &ir_raw_client_list, list) 344 list_for_each_entry(raw, &ir_raw_client_list, list)
482 ir_raw_handler->raw_unregister(raw->dev); 345 ir_raw_handler->raw_unregister(raw->dev);
483 available_protocols &= ~ir_raw_handler->protocols; 346 available_protocols &= ~ir_raw_handler->protocols;
484 if (ir_raw_handler->encode)
485 encode_protocols &= ~ir_raw_handler->protocols;
486 mutex_unlock(&ir_raw_handler_lock); 347 mutex_unlock(&ir_raw_handler_lock);
487} 348}
488EXPORT_SYMBOL(ir_raw_handler_unregister); 349EXPORT_SYMBOL(ir_raw_handler_unregister);
diff --git a/drivers/media/rc/rc-loopback.c b/drivers/media/rc/rc-loopback.c
index d8bdf63ce985..63dace8198b0 100644
--- a/drivers/media/rc/rc-loopback.c
+++ b/drivers/media/rc/rc-loopback.c
@@ -26,7 +26,6 @@
26#include <linux/device.h> 26#include <linux/device.h>
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/sched.h> 28#include <linux/sched.h>
29#include <linux/slab.h>
30#include <media/rc-core.h> 29#include <media/rc-core.h>
31 30
32#define DRIVER_NAME "rc-loopback" 31#define DRIVER_NAME "rc-loopback"
@@ -177,39 +176,6 @@ static int loop_set_carrier_report(struct rc_dev *dev, int enable)
177 return 0; 176 return 0;
178} 177}
179 178
180static int loop_set_wakeup_filter(struct rc_dev *dev,
181 struct rc_scancode_filter *sc_filter)
182{
183 static const unsigned int max = 512;
184 struct ir_raw_event *raw;
185 int ret;
186 int i;
187
188 /* fine to disable filter */
189 if (!sc_filter->mask)
190 return 0;
191
192 /* encode the specified filter and loop it back */
193 raw = kmalloc_array(max, sizeof(*raw), GFP_KERNEL);
194 ret = ir_raw_encode_scancode(dev->enabled_wakeup_protocols, sc_filter,
195 raw, max);
196 /* still loop back the partial raw IR even if it's incomplete */
197 if (ret == -ENOBUFS)
198 ret = max;
199 if (ret >= 0) {
200 /* do the loopback */
201 for (i = 0; i < ret; ++i)
202 ir_raw_event_store(dev, &raw[i]);
203 ir_raw_event_handle(dev);
204
205 ret = 0;
206 }
207
208 kfree(raw);
209
210 return ret;
211}
212
213static int __init loop_init(void) 179static int __init loop_init(void)
214{ 180{
215 struct rc_dev *rc; 181 struct rc_dev *rc;
@@ -229,7 +195,6 @@ static int __init loop_init(void)
229 rc->map_name = RC_MAP_EMPTY; 195 rc->map_name = RC_MAP_EMPTY;
230 rc->priv = &loopdev; 196 rc->priv = &loopdev;
231 rc->driver_type = RC_DRIVER_IR_RAW; 197 rc->driver_type = RC_DRIVER_IR_RAW;
232 rc->encode_wakeup = true;
233 rc->allowed_protocols = RC_BIT_ALL; 198 rc->allowed_protocols = RC_BIT_ALL;
234 rc->timeout = 100 * 1000 * 1000; /* 100 ms */ 199 rc->timeout = 100 * 1000 * 1000; /* 100 ms */
235 rc->min_timeout = 1; 200 rc->min_timeout = 1;
@@ -244,7 +209,6 @@ static int __init loop_init(void)
244 rc->s_idle = loop_set_idle; 209 rc->s_idle = loop_set_idle;
245 rc->s_learning_mode = loop_set_learning_mode; 210 rc->s_learning_mode = loop_set_learning_mode;
246 rc->s_carrier_report = loop_set_carrier_report; 211 rc->s_carrier_report = loop_set_carrier_report;
247 rc->s_wakeup_filter = loop_set_wakeup_filter;
248 212
249 loopdev.txmask = RXMASK_REGULAR; 213 loopdev.txmask = RXMASK_REGULAR;
250 loopdev.txcarrier = 36000; 214 loopdev.txcarrier = 36000;
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 9d015db65280..0ff388a16168 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -865,8 +865,6 @@ static ssize_t show_protocols(struct device *device,
865 } else { 865 } else {
866 enabled = dev->enabled_wakeup_protocols; 866 enabled = dev->enabled_wakeup_protocols;
867 allowed = dev->allowed_wakeup_protocols; 867 allowed = dev->allowed_wakeup_protocols;
868 if (dev->encode_wakeup && !allowed)
869 allowed = ir_raw_get_encode_protocols();
870 } 868 }
871 869
872 mutex_unlock(&dev->lock); 870 mutex_unlock(&dev->lock);
@@ -1408,16 +1406,13 @@ int rc_register_device(struct rc_dev *dev)
1408 path ? path : "N/A"); 1406 path ? path : "N/A");
1409 kfree(path); 1407 kfree(path);
1410 1408
1411 if (dev->driver_type == RC_DRIVER_IR_RAW || dev->encode_wakeup) { 1409 if (dev->driver_type == RC_DRIVER_IR_RAW) {
1412 /* Load raw decoders, if they aren't already */ 1410 /* Load raw decoders, if they aren't already */
1413 if (!raw_init) { 1411 if (!raw_init) {
1414 IR_dprintk(1, "Loading raw decoders\n"); 1412 IR_dprintk(1, "Loading raw decoders\n");
1415 ir_raw_init(); 1413 ir_raw_init();
1416 raw_init = true; 1414 raw_init = true;
1417 } 1415 }
1418 }
1419
1420 if (dev->driver_type == RC_DRIVER_IR_RAW) {
1421 /* calls ir_register_device so unlock mutex here*/ 1416 /* calls ir_register_device so unlock mutex here*/
1422 mutex_unlock(&dev->lock); 1417 mutex_unlock(&dev->lock);
1423 rc = ir_raw_event_register(dev); 1418 rc = ir_raw_event_register(dev);
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 93b315459098..a14c428f70e9 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -715,6 +715,7 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
715 break; 715 break;
716 case VB2_BUF_STATE_PREPARING: 716 case VB2_BUF_STATE_PREPARING:
717 case VB2_BUF_STATE_DEQUEUED: 717 case VB2_BUF_STATE_DEQUEUED:
718 case VB2_BUF_STATE_REQUEUEING:
718 /* nothing */ 719 /* nothing */
719 break; 720 break;
720 } 721 }
@@ -1182,7 +1183,8 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
1182 1183
1183 if (WARN_ON(state != VB2_BUF_STATE_DONE && 1184 if (WARN_ON(state != VB2_BUF_STATE_DONE &&
1184 state != VB2_BUF_STATE_ERROR && 1185 state != VB2_BUF_STATE_ERROR &&
1185 state != VB2_BUF_STATE_QUEUED)) 1186 state != VB2_BUF_STATE_QUEUED &&
1187 state != VB2_BUF_STATE_REQUEUEING))
1186 state = VB2_BUF_STATE_ERROR; 1188 state = VB2_BUF_STATE_ERROR;
1187 1189
1188#ifdef CONFIG_VIDEO_ADV_DEBUG 1190#ifdef CONFIG_VIDEO_ADV_DEBUG
@@ -1199,22 +1201,30 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
1199 for (plane = 0; plane < vb->num_planes; ++plane) 1201 for (plane = 0; plane < vb->num_planes; ++plane)
1200 call_void_memop(vb, finish, vb->planes[plane].mem_priv); 1202 call_void_memop(vb, finish, vb->planes[plane].mem_priv);
1201 1203
1202 /* Add the buffer to the done buffers list */
1203 spin_lock_irqsave(&q->done_lock, flags); 1204 spin_lock_irqsave(&q->done_lock, flags);
1204 vb->state = state; 1205 if (state == VB2_BUF_STATE_QUEUED ||
1205 if (state != VB2_BUF_STATE_QUEUED) 1206 state == VB2_BUF_STATE_REQUEUEING) {
1207 vb->state = VB2_BUF_STATE_QUEUED;
1208 } else {
1209 /* Add the buffer to the done buffers list */
1206 list_add_tail(&vb->done_entry, &q->done_list); 1210 list_add_tail(&vb->done_entry, &q->done_list);
1211 vb->state = state;
1212 }
1207 atomic_dec(&q->owned_by_drv_count); 1213 atomic_dec(&q->owned_by_drv_count);
1208 spin_unlock_irqrestore(&q->done_lock, flags); 1214 spin_unlock_irqrestore(&q->done_lock, flags);
1209 1215
1210 if (state == VB2_BUF_STATE_QUEUED) { 1216 switch (state) {
1217 case VB2_BUF_STATE_QUEUED:
1218 return;
1219 case VB2_BUF_STATE_REQUEUEING:
1211 if (q->start_streaming_called) 1220 if (q->start_streaming_called)
1212 __enqueue_in_driver(vb); 1221 __enqueue_in_driver(vb);
1213 return; 1222 return;
1223 default:
1224 /* Inform any processes that may be waiting for buffers */
1225 wake_up(&q->done_wq);
1226 break;
1214 } 1227 }
1215
1216 /* Inform any processes that may be waiting for buffers */
1217 wake_up(&q->done_wq);
1218} 1228}
1219EXPORT_SYMBOL_GPL(vb2_buffer_done); 1229EXPORT_SYMBOL_GPL(vb2_buffer_done);
1220 1230
@@ -1244,19 +1254,19 @@ EXPORT_SYMBOL_GPL(vb2_discard_done);
1244 1254
1245static void vb2_warn_zero_bytesused(struct vb2_buffer *vb) 1255static void vb2_warn_zero_bytesused(struct vb2_buffer *vb)
1246{ 1256{
1247 static bool __check_once __read_mostly; 1257 static bool check_once;
1248 1258
1249 if (__check_once) 1259 if (check_once)
1250 return; 1260 return;
1251 1261
1252 __check_once = true; 1262 check_once = true;
1253 __WARN(); 1263 WARN_ON(1);
1254 1264
1255 pr_warn_once("use of bytesused == 0 is deprecated and will be removed in the future,\n"); 1265 pr_warn("use of bytesused == 0 is deprecated and will be removed in the future,\n");
1256 if (vb->vb2_queue->allow_zero_bytesused) 1266 if (vb->vb2_queue->allow_zero_bytesused)
1257 pr_warn_once("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n"); 1267 pr_warn("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
1258 else 1268 else
1259 pr_warn_once("use the actual size instead.\n"); 1269 pr_warn("use the actual size instead.\n");
1260} 1270}
1261 1271
1262/** 1272/**
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index 3a27a84ad3ec..9426276dbe14 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -2245,6 +2245,9 @@ void omap3_gpmc_save_context(void)
2245{ 2245{
2246 int i; 2246 int i;
2247 2247
2248 if (!gpmc_base)
2249 return;
2250
2248 gpmc_context.sysconfig = gpmc_read_reg(GPMC_SYSCONFIG); 2251 gpmc_context.sysconfig = gpmc_read_reg(GPMC_SYSCONFIG);
2249 gpmc_context.irqenable = gpmc_read_reg(GPMC_IRQENABLE); 2252 gpmc_context.irqenable = gpmc_read_reg(GPMC_IRQENABLE);
2250 gpmc_context.timeout_ctrl = gpmc_read_reg(GPMC_TIMEOUT_CONTROL); 2253 gpmc_context.timeout_ctrl = gpmc_read_reg(GPMC_TIMEOUT_CONTROL);
@@ -2277,6 +2280,9 @@ void omap3_gpmc_restore_context(void)
2277{ 2280{
2278 int i; 2281 int i;
2279 2282
2283 if (!gpmc_base)
2284 return;
2285
2280 gpmc_write_reg(GPMC_SYSCONFIG, gpmc_context.sysconfig); 2286 gpmc_write_reg(GPMC_SYSCONFIG, gpmc_context.sysconfig);
2281 gpmc_write_reg(GPMC_IRQENABLE, gpmc_context.irqenable); 2287 gpmc_write_reg(GPMC_IRQENABLE, gpmc_context.irqenable);
2282 gpmc_write_reg(GPMC_TIMEOUT_CONTROL, gpmc_context.timeout_ctrl); 2288 gpmc_write_reg(GPMC_TIMEOUT_CONTROL, gpmc_context.timeout_ctrl);
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 653815950aa2..3f68dd251ce8 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -115,7 +115,7 @@ config MFD_CROS_EC_I2C
115 115
116config MFD_CROS_EC_SPI 116config MFD_CROS_EC_SPI
117 tristate "ChromeOS Embedded Controller (SPI)" 117 tristate "ChromeOS Embedded Controller (SPI)"
118 depends on MFD_CROS_EC && CROS_EC_PROTO && SPI && OF 118 depends on MFD_CROS_EC && CROS_EC_PROTO && SPI
119 119
120 ---help--- 120 ---help---
121 If you say Y here, you get support for talking to the ChromeOS EC 121 If you say Y here, you get support for talking to the ChromeOS EC
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index bebf58a06a6b..a72ddb295078 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -651,7 +651,7 @@ static int arizona_runtime_suspend(struct device *dev)
651 651
652 arizona->has_fully_powered_off = true; 652 arizona->has_fully_powered_off = true;
653 653
654 disable_irq(arizona->irq); 654 disable_irq_nosync(arizona->irq);
655 arizona_enable_reset(arizona); 655 arizona_enable_reset(arizona);
656 regulator_bulk_disable(arizona->num_core_supplies, 656 regulator_bulk_disable(arizona->num_core_supplies,
657 arizona->core_supplies); 657 arizona->core_supplies);
@@ -1141,10 +1141,6 @@ int arizona_dev_init(struct arizona *arizona)
1141 arizona->pdata.gpio_defaults[i]); 1141 arizona->pdata.gpio_defaults[i]);
1142 } 1142 }
1143 1143
1144 pm_runtime_set_autosuspend_delay(arizona->dev, 100);
1145 pm_runtime_use_autosuspend(arizona->dev);
1146 pm_runtime_enable(arizona->dev);
1147
1148 /* Chip default */ 1144 /* Chip default */
1149 if (!arizona->pdata.clk32k_src) 1145 if (!arizona->pdata.clk32k_src)
1150 arizona->pdata.clk32k_src = ARIZONA_32KZ_MCLK2; 1146 arizona->pdata.clk32k_src = ARIZONA_32KZ_MCLK2;
@@ -1245,11 +1241,17 @@ int arizona_dev_init(struct arizona *arizona)
1245 arizona->pdata.spk_fmt[i]); 1241 arizona->pdata.spk_fmt[i]);
1246 } 1242 }
1247 1243
1244 pm_runtime_set_active(arizona->dev);
1245 pm_runtime_enable(arizona->dev);
1246
1248 /* Set up for interrupts */ 1247 /* Set up for interrupts */
1249 ret = arizona_irq_init(arizona); 1248 ret = arizona_irq_init(arizona);
1250 if (ret != 0) 1249 if (ret != 0)
1251 goto err_reset; 1250 goto err_reset;
1252 1251
1252 pm_runtime_set_autosuspend_delay(arizona->dev, 100);
1253 pm_runtime_use_autosuspend(arizona->dev);
1254
1253 arizona_request_irq(arizona, ARIZONA_IRQ_CLKGEN_ERR, "CLKGEN error", 1255 arizona_request_irq(arizona, ARIZONA_IRQ_CLKGEN_ERR, "CLKGEN error",
1254 arizona_clkgen_err, arizona); 1256 arizona_clkgen_err, arizona);
1255 arizona_request_irq(arizona, ARIZONA_IRQ_OVERCLOCKED, "Overclocked", 1257 arizona_request_irq(arizona, ARIZONA_IRQ_OVERCLOCKED, "Overclocked",
@@ -1278,10 +1280,6 @@ int arizona_dev_init(struct arizona *arizona)
1278 goto err_irq; 1280 goto err_irq;
1279 } 1281 }
1280 1282
1281#ifdef CONFIG_PM
1282 regulator_disable(arizona->dcvdd);
1283#endif
1284
1285 return 0; 1283 return 0;
1286 1284
1287err_irq: 1285err_irq:
diff --git a/drivers/mfd/max77693.c b/drivers/mfd/max77693.c
index cb14afa97e6f..67bc53fdc389 100644
--- a/drivers/mfd/max77693.c
+++ b/drivers/mfd/max77693.c
@@ -33,6 +33,7 @@
33#include <linux/mutex.h> 33#include <linux/mutex.h>
34#include <linux/mfd/core.h> 34#include <linux/mfd/core.h>
35#include <linux/mfd/max77693.h> 35#include <linux/mfd/max77693.h>
36#include <linux/mfd/max77693-common.h>
36#include <linux/mfd/max77693-private.h> 37#include <linux/mfd/max77693-private.h>
37#include <linux/regulator/machine.h> 38#include <linux/regulator/machine.h>
38#include <linux/regmap.h> 39#include <linux/regmap.h>
@@ -193,22 +194,22 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
193 } else 194 } else
194 dev_info(max77693->dev, "device ID: 0x%x\n", reg_data); 195 dev_info(max77693->dev, "device ID: 0x%x\n", reg_data);
195 196
196 max77693->muic = i2c_new_dummy(i2c->adapter, I2C_ADDR_MUIC); 197 max77693->i2c_muic = i2c_new_dummy(i2c->adapter, I2C_ADDR_MUIC);
197 if (!max77693->muic) { 198 if (!max77693->i2c_muic) {
198 dev_err(max77693->dev, "Failed to allocate I2C device for MUIC\n"); 199 dev_err(max77693->dev, "Failed to allocate I2C device for MUIC\n");
199 return -ENODEV; 200 return -ENODEV;
200 } 201 }
201 i2c_set_clientdata(max77693->muic, max77693); 202 i2c_set_clientdata(max77693->i2c_muic, max77693);
202 203
203 max77693->haptic = i2c_new_dummy(i2c->adapter, I2C_ADDR_HAPTIC); 204 max77693->i2c_haptic = i2c_new_dummy(i2c->adapter, I2C_ADDR_HAPTIC);
204 if (!max77693->haptic) { 205 if (!max77693->i2c_haptic) {
205 dev_err(max77693->dev, "Failed to allocate I2C device for Haptic\n"); 206 dev_err(max77693->dev, "Failed to allocate I2C device for Haptic\n");
206 ret = -ENODEV; 207 ret = -ENODEV;
207 goto err_i2c_haptic; 208 goto err_i2c_haptic;
208 } 209 }
209 i2c_set_clientdata(max77693->haptic, max77693); 210 i2c_set_clientdata(max77693->i2c_haptic, max77693);
210 211
211 max77693->regmap_haptic = devm_regmap_init_i2c(max77693->haptic, 212 max77693->regmap_haptic = devm_regmap_init_i2c(max77693->i2c_haptic,
212 &max77693_regmap_haptic_config); 213 &max77693_regmap_haptic_config);
213 if (IS_ERR(max77693->regmap_haptic)) { 214 if (IS_ERR(max77693->regmap_haptic)) {
214 ret = PTR_ERR(max77693->regmap_haptic); 215 ret = PTR_ERR(max77693->regmap_haptic);
@@ -222,7 +223,7 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
222 * instance of MUIC device when irq of max77693 is initialized 223 * instance of MUIC device when irq of max77693 is initialized
223 * before call max77693-muic probe() function. 224 * before call max77693-muic probe() function.
224 */ 225 */
225 max77693->regmap_muic = devm_regmap_init_i2c(max77693->muic, 226 max77693->regmap_muic = devm_regmap_init_i2c(max77693->i2c_muic,
226 &max77693_regmap_muic_config); 227 &max77693_regmap_muic_config);
227 if (IS_ERR(max77693->regmap_muic)) { 228 if (IS_ERR(max77693->regmap_muic)) {
228 ret = PTR_ERR(max77693->regmap_muic); 229 ret = PTR_ERR(max77693->regmap_muic);
@@ -255,7 +256,7 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
255 IRQF_ONESHOT | IRQF_SHARED | 256 IRQF_ONESHOT | IRQF_SHARED |
256 IRQF_TRIGGER_FALLING, 0, 257 IRQF_TRIGGER_FALLING, 0,
257 &max77693_charger_irq_chip, 258 &max77693_charger_irq_chip,
258 &max77693->irq_data_charger); 259 &max77693->irq_data_chg);
259 if (ret) { 260 if (ret) {
260 dev_err(max77693->dev, "failed to add irq chip: %d\n", ret); 261 dev_err(max77693->dev, "failed to add irq chip: %d\n", ret);
261 goto err_irq_charger; 262 goto err_irq_charger;
@@ -296,15 +297,15 @@ err_mfd:
296err_intsrc: 297err_intsrc:
297 regmap_del_irq_chip(max77693->irq, max77693->irq_data_muic); 298 regmap_del_irq_chip(max77693->irq, max77693->irq_data_muic);
298err_irq_muic: 299err_irq_muic:
299 regmap_del_irq_chip(max77693->irq, max77693->irq_data_charger); 300 regmap_del_irq_chip(max77693->irq, max77693->irq_data_chg);
300err_irq_charger: 301err_irq_charger:
301 regmap_del_irq_chip(max77693->irq, max77693->irq_data_topsys); 302 regmap_del_irq_chip(max77693->irq, max77693->irq_data_topsys);
302err_irq_topsys: 303err_irq_topsys:
303 regmap_del_irq_chip(max77693->irq, max77693->irq_data_led); 304 regmap_del_irq_chip(max77693->irq, max77693->irq_data_led);
304err_regmap: 305err_regmap:
305 i2c_unregister_device(max77693->haptic); 306 i2c_unregister_device(max77693->i2c_haptic);
306err_i2c_haptic: 307err_i2c_haptic:
307 i2c_unregister_device(max77693->muic); 308 i2c_unregister_device(max77693->i2c_muic);
308 return ret; 309 return ret;
309} 310}
310 311
@@ -315,12 +316,12 @@ static int max77693_i2c_remove(struct i2c_client *i2c)
315 mfd_remove_devices(max77693->dev); 316 mfd_remove_devices(max77693->dev);
316 317
317 regmap_del_irq_chip(max77693->irq, max77693->irq_data_muic); 318 regmap_del_irq_chip(max77693->irq, max77693->irq_data_muic);
318 regmap_del_irq_chip(max77693->irq, max77693->irq_data_charger); 319 regmap_del_irq_chip(max77693->irq, max77693->irq_data_chg);
319 regmap_del_irq_chip(max77693->irq, max77693->irq_data_topsys); 320 regmap_del_irq_chip(max77693->irq, max77693->irq_data_topsys);
320 regmap_del_irq_chip(max77693->irq, max77693->irq_data_led); 321 regmap_del_irq_chip(max77693->irq, max77693->irq_data_led);
321 322
322 i2c_unregister_device(max77693->muic); 323 i2c_unregister_device(max77693->i2c_muic);
323 i2c_unregister_device(max77693->haptic); 324 i2c_unregister_device(max77693->i2c_haptic);
324 325
325 return 0; 326 return 0;
326} 327}
diff --git a/drivers/mfd/max77843.c b/drivers/mfd/max77843.c
index a354ac677ec7..c52162ea3d0a 100644
--- a/drivers/mfd/max77843.c
+++ b/drivers/mfd/max77843.c
@@ -17,6 +17,7 @@
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/mfd/core.h> 19#include <linux/mfd/core.h>
20#include <linux/mfd/max77693-common.h>
20#include <linux/mfd/max77843-private.h> 21#include <linux/mfd/max77843-private.h>
21#include <linux/of_device.h> 22#include <linux/of_device.h>
22#include <linux/platform_device.h> 23#include <linux/platform_device.h>
@@ -71,7 +72,7 @@ static const struct regmap_irq_chip max77843_irq_chip = {
71}; 72};
72 73
73/* Charger and Charger regulator use same regmap. */ 74/* Charger and Charger regulator use same regmap. */
74static int max77843_chg_init(struct max77843 *max77843) 75static int max77843_chg_init(struct max77693_dev *max77843)
75{ 76{
76 int ret; 77 int ret;
77 78
@@ -101,7 +102,7 @@ err_chg_i2c:
101static int max77843_probe(struct i2c_client *i2c, 102static int max77843_probe(struct i2c_client *i2c,
102 const struct i2c_device_id *id) 103 const struct i2c_device_id *id)
103{ 104{
104 struct max77843 *max77843; 105 struct max77693_dev *max77843;
105 unsigned int reg_data; 106 unsigned int reg_data;
106 int ret; 107 int ret;
107 108
@@ -113,6 +114,7 @@ static int max77843_probe(struct i2c_client *i2c,
113 max77843->dev = &i2c->dev; 114 max77843->dev = &i2c->dev;
114 max77843->i2c = i2c; 115 max77843->i2c = i2c;
115 max77843->irq = i2c->irq; 116 max77843->irq = i2c->irq;
117 max77843->type = id->driver_data;
116 118
117 max77843->regmap = devm_regmap_init_i2c(i2c, 119 max77843->regmap = devm_regmap_init_i2c(i2c,
118 &max77843_regmap_config); 120 &max77843_regmap_config);
@@ -123,7 +125,7 @@ static int max77843_probe(struct i2c_client *i2c,
123 125
124 ret = regmap_add_irq_chip(max77843->regmap, max77843->irq, 126 ret = regmap_add_irq_chip(max77843->regmap, max77843->irq,
125 IRQF_TRIGGER_LOW | IRQF_ONESHOT | IRQF_SHARED, 127 IRQF_TRIGGER_LOW | IRQF_ONESHOT | IRQF_SHARED,
126 0, &max77843_irq_chip, &max77843->irq_data); 128 0, &max77843_irq_chip, &max77843->irq_data_topsys);
127 if (ret) { 129 if (ret) {
128 dev_err(&i2c->dev, "Failed to add TOPSYS IRQ chip\n"); 130 dev_err(&i2c->dev, "Failed to add TOPSYS IRQ chip\n");
129 return ret; 131 return ret;
@@ -164,18 +166,18 @@ static int max77843_probe(struct i2c_client *i2c,
164 return 0; 166 return 0;
165 167
166err_pmic_id: 168err_pmic_id:
167 regmap_del_irq_chip(max77843->irq, max77843->irq_data); 169 regmap_del_irq_chip(max77843->irq, max77843->irq_data_topsys);
168 170
169 return ret; 171 return ret;
170} 172}
171 173
172static int max77843_remove(struct i2c_client *i2c) 174static int max77843_remove(struct i2c_client *i2c)
173{ 175{
174 struct max77843 *max77843 = i2c_get_clientdata(i2c); 176 struct max77693_dev *max77843 = i2c_get_clientdata(i2c);
175 177
176 mfd_remove_devices(max77843->dev); 178 mfd_remove_devices(max77843->dev);
177 179
178 regmap_del_irq_chip(max77843->irq, max77843->irq_data); 180 regmap_del_irq_chip(max77843->irq, max77843->irq_data_topsys);
179 181
180 i2c_unregister_device(max77843->i2c_chg); 182 i2c_unregister_device(max77843->i2c_chg);
181 183
@@ -188,7 +190,7 @@ static const struct of_device_id max77843_dt_match[] = {
188}; 190};
189 191
190static const struct i2c_device_id max77843_id[] = { 192static const struct i2c_device_id max77843_id[] = {
191 { "max77843", }, 193 { "max77843", TYPE_MAX77843, },
192 { }, 194 { },
193}; 195};
194MODULE_DEVICE_TABLE(i2c, max77843_id); 196MODULE_DEVICE_TABLE(i2c, max77843_id);
@@ -196,7 +198,7 @@ MODULE_DEVICE_TABLE(i2c, max77843_id);
196static int __maybe_unused max77843_suspend(struct device *dev) 198static int __maybe_unused max77843_suspend(struct device *dev)
197{ 199{
198 struct i2c_client *i2c = container_of(dev, struct i2c_client, dev); 200 struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
199 struct max77843 *max77843 = i2c_get_clientdata(i2c); 201 struct max77693_dev *max77843 = i2c_get_clientdata(i2c);
200 202
201 disable_irq(max77843->irq); 203 disable_irq(max77843->irq);
202 if (device_may_wakeup(dev)) 204 if (device_may_wakeup(dev))
@@ -208,7 +210,7 @@ static int __maybe_unused max77843_suspend(struct device *dev)
208static int __maybe_unused max77843_resume(struct device *dev) 210static int __maybe_unused max77843_resume(struct device *dev)
209{ 211{
210 struct i2c_client *i2c = container_of(dev, struct i2c_client, dev); 212 struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
211 struct max77843 *max77843 = i2c_get_clientdata(i2c); 213 struct max77693_dev *max77843 = i2c_get_clientdata(i2c);
212 214
213 if (device_may_wakeup(dev)) 215 if (device_may_wakeup(dev))
214 disable_irq_wake(max77843->irq); 216 disable_irq_wake(max77843->irq);
diff --git a/drivers/mfd/stmpe-i2c.c b/drivers/mfd/stmpe-i2c.c
index 5c054031c3f8..e14c8c9d189b 100644
--- a/drivers/mfd/stmpe-i2c.c
+++ b/drivers/mfd/stmpe-i2c.c
@@ -6,7 +6,7 @@
6 * 6 *
7 * License Terms: GNU General Public License, version 2 7 * License Terms: GNU General Public License, version 2
8 * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson 8 * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
9 * Author: Viresh Kumar <viresh.linux@gmail.com> for ST Microelectronics 9 * Author: Viresh Kumar <vireshk@kernel.org> for ST Microelectronics
10 */ 10 */
11 11
12#include <linux/i2c.h> 12#include <linux/i2c.h>
diff --git a/drivers/mfd/stmpe-spi.c b/drivers/mfd/stmpe-spi.c
index a81badbaa917..6fdb30e84a2b 100644
--- a/drivers/mfd/stmpe-spi.c
+++ b/drivers/mfd/stmpe-spi.c
@@ -4,7 +4,7 @@
4 * Copyright (C) ST Microelectronics SA 2011 4 * Copyright (C) ST Microelectronics SA 2011
5 * 5 *
6 * License Terms: GNU General Public License, version 2 6 * License Terms: GNU General Public License, version 2
7 * Author: Viresh Kumar <viresh.linux@gmail.com> for ST Microelectronics 7 * Author: Viresh Kumar <vireshk@kernel.org> for ST Microelectronics
8 */ 8 */
9 9
10#include <linux/spi/spi.h> 10#include <linux/spi/spi.h>
@@ -146,4 +146,4 @@ module_exit(stmpe_exit);
146 146
147MODULE_LICENSE("GPL v2"); 147MODULE_LICENSE("GPL v2");
148MODULE_DESCRIPTION("STMPE MFD SPI Interface Driver"); 148MODULE_DESCRIPTION("STMPE MFD SPI Interface Driver");
149MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); 149MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 2d3db81be099..6ded3dc36644 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -438,9 +438,6 @@ static ssize_t at24_bin_write(struct file *filp, struct kobject *kobj,
438{ 438{
439 struct at24_data *at24; 439 struct at24_data *at24;
440 440
441 if (unlikely(off >= attr->size))
442 return -EFBIG;
443
444 at24 = dev_get_drvdata(container_of(kobj, struct device, kobj)); 441 at24 = dev_get_drvdata(container_of(kobj, struct device, kobj));
445 return at24_write(at24, buf, off, count); 442 return at24_write(at24, buf, off, count);
446} 443}
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 8eb0a9500a90..e9513d651cd3 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -682,7 +682,7 @@ int mei_register(struct mei_device *dev, struct device *parent)
682 /* Fill in the data structures */ 682 /* Fill in the data structures */
683 devno = MKDEV(MAJOR(mei_devt), dev->minor); 683 devno = MKDEV(MAJOR(mei_devt), dev->minor);
684 cdev_init(&dev->cdev, &mei_fops); 684 cdev_init(&dev->cdev, &mei_fops);
685 dev->cdev.owner = mei_fops.owner; 685 dev->cdev.owner = parent->driver->owner;
686 686
687 /* Add the device */ 687 /* Add the device */
688 ret = cdev_add(&dev->cdev, devno, 1); 688 ret = cdev_add(&dev->cdev, devno, 1);
diff --git a/drivers/misc/mic/scif/scif_nodeqp.c b/drivers/misc/mic/scif/scif_nodeqp.c
index 41e3bdb10061..6dfdae3452d6 100644
--- a/drivers/misc/mic/scif/scif_nodeqp.c
+++ b/drivers/misc/mic/scif/scif_nodeqp.c
@@ -357,7 +357,7 @@ static void scif_p2p_freesg(struct scatterlist *sg)
357} 357}
358 358
359static struct scatterlist * 359static struct scatterlist *
360scif_p2p_setsg(void __iomem *va, int page_size, int page_cnt) 360scif_p2p_setsg(phys_addr_t pa, int page_size, int page_cnt)
361{ 361{
362 struct scatterlist *sg; 362 struct scatterlist *sg;
363 struct page *page; 363 struct page *page;
@@ -368,16 +368,11 @@ scif_p2p_setsg(void __iomem *va, int page_size, int page_cnt)
368 return NULL; 368 return NULL;
369 sg_init_table(sg, page_cnt); 369 sg_init_table(sg, page_cnt);
370 for (i = 0; i < page_cnt; i++) { 370 for (i = 0; i < page_cnt; i++) {
371 page = vmalloc_to_page((void __force *)va); 371 page = pfn_to_page(pa >> PAGE_SHIFT);
372 if (!page)
373 goto p2p_sg_err;
374 sg_set_page(&sg[i], page, page_size, 0); 372 sg_set_page(&sg[i], page, page_size, 0);
375 va += page_size; 373 pa += page_size;
376 } 374 }
377 return sg; 375 return sg;
378p2p_sg_err:
379 kfree(sg);
380 return NULL;
381} 376}
382 377
383/* Init p2p mappings required to access peerdev from scifdev */ 378/* Init p2p mappings required to access peerdev from scifdev */
@@ -395,14 +390,14 @@ scif_init_p2p_info(struct scif_dev *scifdev, struct scif_dev *peerdev)
395 p2p = kzalloc(sizeof(*p2p), GFP_KERNEL); 390 p2p = kzalloc(sizeof(*p2p), GFP_KERNEL);
396 if (!p2p) 391 if (!p2p)
397 return NULL; 392 return NULL;
398 p2p->ppi_sg[SCIF_PPI_MMIO] = scif_p2p_setsg(psdev->mmio->va, 393 p2p->ppi_sg[SCIF_PPI_MMIO] = scif_p2p_setsg(psdev->mmio->pa,
399 PAGE_SIZE, num_mmio_pages); 394 PAGE_SIZE, num_mmio_pages);
400 if (!p2p->ppi_sg[SCIF_PPI_MMIO]) 395 if (!p2p->ppi_sg[SCIF_PPI_MMIO])
401 goto free_p2p; 396 goto free_p2p;
402 p2p->sg_nentries[SCIF_PPI_MMIO] = num_mmio_pages; 397 p2p->sg_nentries[SCIF_PPI_MMIO] = num_mmio_pages;
403 sg_page_shift = get_order(min(psdev->aper->len, (u64)(1 << 30))); 398 sg_page_shift = get_order(min(psdev->aper->len, (u64)(1 << 30)));
404 num_aper_chunks = num_aper_pages >> (sg_page_shift - PAGE_SHIFT); 399 num_aper_chunks = num_aper_pages >> (sg_page_shift - PAGE_SHIFT);
405 p2p->ppi_sg[SCIF_PPI_APER] = scif_p2p_setsg(psdev->aper->va, 400 p2p->ppi_sg[SCIF_PPI_APER] = scif_p2p_setsg(psdev->aper->pa,
406 1 << sg_page_shift, 401 1 << sg_page_shift,
407 num_aper_chunks); 402 num_aper_chunks);
408 p2p->sg_nentries[SCIF_PPI_APER] = num_aper_chunks; 403 p2p->sg_nentries[SCIF_PPI_APER] = num_aper_chunks;
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index c9c3d20b784b..a1b820fcb2a6 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -208,6 +208,8 @@ static ssize_t power_ro_lock_show(struct device *dev,
208 208
209 ret = snprintf(buf, PAGE_SIZE, "%d\n", locked); 209 ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
210 210
211 mmc_blk_put(md);
212
211 return ret; 213 return ret;
212} 214}
213 215
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index fd9a58e216a5..6a0f9c79be26 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -779,6 +779,7 @@ config MMC_TOSHIBA_PCI
779 779
780config MMC_MTK 780config MMC_MTK
781 tristate "MediaTek SD/MMC Card Interface support" 781 tristate "MediaTek SD/MMC Card Interface support"
782 depends on HAS_DMA
782 help 783 help
783 This selects the MediaTek(R) Secure digital and Multimedia card Interface. 784 This selects the MediaTek(R) Secure digital and Multimedia card Interface.
784 If you have a machine with a integrated SD/MMC card reader, say Y or M here. 785 If you have a machine with a integrated SD/MMC card reader, say Y or M here.
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index b2b411da297b..4d1203236890 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1062,9 +1062,14 @@ static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
1062 1062
1063 if (status & (CTO_EN | CCRC_EN)) 1063 if (status & (CTO_EN | CCRC_EN))
1064 end_cmd = 1; 1064 end_cmd = 1;
1065 if (host->data || host->response_busy) {
1066 end_trans = !end_cmd;
1067 host->response_busy = 0;
1068 }
1065 if (status & (CTO_EN | DTO_EN)) 1069 if (status & (CTO_EN | DTO_EN))
1066 hsmmc_command_incomplete(host, -ETIMEDOUT, end_cmd); 1070 hsmmc_command_incomplete(host, -ETIMEDOUT, end_cmd);
1067 else if (status & (CCRC_EN | DCRC_EN)) 1071 else if (status & (CCRC_EN | DCRC_EN | DEB_EN | CEB_EN |
1072 BADA_EN))
1068 hsmmc_command_incomplete(host, -EILSEQ, end_cmd); 1073 hsmmc_command_incomplete(host, -EILSEQ, end_cmd);
1069 1074
1070 if (status & ACE_EN) { 1075 if (status & ACE_EN) {
@@ -1081,10 +1086,6 @@ static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
1081 } 1086 }
1082 dev_dbg(mmc_dev(host->mmc), "AC12 err: 0x%x\n", ac12); 1087 dev_dbg(mmc_dev(host->mmc), "AC12 err: 0x%x\n", ac12);
1083 } 1088 }
1084 if (host->data || host->response_busy) {
1085 end_trans = !end_cmd;
1086 host->response_busy = 0;
1087 }
1088 } 1089 }
1089 1090
1090 OMAP_HSMMC_WRITE(host->base, STAT, status); 1091 OMAP_HSMMC_WRITE(host->base, STAT, status);
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index faf0cb910c96..c6b9f6492e1a 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -581,13 +581,8 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
581static unsigned int esdhc_pltfm_get_max_clock(struct sdhci_host *host) 581static unsigned int esdhc_pltfm_get_max_clock(struct sdhci_host *host)
582{ 582{
583 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 583 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
584 struct pltfm_imx_data *imx_data = pltfm_host->priv;
585 struct esdhc_platform_data *boarddata = &imx_data->boarddata;
586 584
587 if (boarddata->f_max && (boarddata->f_max < pltfm_host->clock)) 585 return pltfm_host->clock;
588 return boarddata->f_max;
589 else
590 return pltfm_host->clock;
591} 586}
592 587
593static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host) 588static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host)
@@ -878,34 +873,19 @@ static const struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
878static int 873static int
879sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, 874sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
880 struct sdhci_host *host, 875 struct sdhci_host *host,
881 struct esdhc_platform_data *boarddata) 876 struct pltfm_imx_data *imx_data)
882{ 877{
883 struct device_node *np = pdev->dev.of_node; 878 struct device_node *np = pdev->dev.of_node;
884 879 struct esdhc_platform_data *boarddata = &imx_data->boarddata;
885 if (!np) 880 int ret;
886 return -ENODEV;
887
888 if (of_get_property(np, "non-removable", NULL))
889 boarddata->cd_type = ESDHC_CD_PERMANENT;
890
891 if (of_get_property(np, "fsl,cd-controller", NULL))
892 boarddata->cd_type = ESDHC_CD_CONTROLLER;
893 881
894 if (of_get_property(np, "fsl,wp-controller", NULL)) 882 if (of_get_property(np, "fsl,wp-controller", NULL))
895 boarddata->wp_type = ESDHC_WP_CONTROLLER; 883 boarddata->wp_type = ESDHC_WP_CONTROLLER;
896 884
897 boarddata->cd_gpio = of_get_named_gpio(np, "cd-gpios", 0);
898 if (gpio_is_valid(boarddata->cd_gpio))
899 boarddata->cd_type = ESDHC_CD_GPIO;
900
901 boarddata->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0); 885 boarddata->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0);
902 if (gpio_is_valid(boarddata->wp_gpio)) 886 if (gpio_is_valid(boarddata->wp_gpio))
903 boarddata->wp_type = ESDHC_WP_GPIO; 887 boarddata->wp_type = ESDHC_WP_GPIO;
904 888
905 of_property_read_u32(np, "bus-width", &boarddata->max_bus_width);
906
907 of_property_read_u32(np, "max-frequency", &boarddata->f_max);
908
909 if (of_find_property(np, "no-1-8-v", NULL)) 889 if (of_find_property(np, "no-1-8-v", NULL))
910 boarddata->support_vsel = false; 890 boarddata->support_vsel = false;
911 else 891 else
@@ -916,29 +896,119 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
916 896
917 mmc_of_parse_voltage(np, &host->ocr_mask); 897 mmc_of_parse_voltage(np, &host->ocr_mask);
918 898
899 /* sdr50 and sdr104 needs work on 1.8v signal voltage */
900 if ((boarddata->support_vsel) && esdhc_is_usdhc(imx_data) &&
901 !IS_ERR(imx_data->pins_default)) {
902 imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl,
903 ESDHC_PINCTRL_STATE_100MHZ);
904 imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
905 ESDHC_PINCTRL_STATE_200MHZ);
906 if (IS_ERR(imx_data->pins_100mhz) ||
907 IS_ERR(imx_data->pins_200mhz)) {
908 dev_warn(mmc_dev(host->mmc),
909 "could not get ultra high speed state, work on normal mode\n");
910 /*
911 * fall back to not support uhs by specify no 1.8v quirk
912 */
913 host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
914 }
915 } else {
916 host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
917 }
918
919 /* call to generic mmc_of_parse to support additional capabilities */ 919 /* call to generic mmc_of_parse to support additional capabilities */
920 return mmc_of_parse(host->mmc); 920 ret = mmc_of_parse(host->mmc);
921 if (ret)
922 return ret;
923
924 if (!IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc)))
925 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
926
927 return 0;
921} 928}
922#else 929#else
923static inline int 930static inline int
924sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, 931sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
925 struct sdhci_host *host, 932 struct sdhci_host *host,
926 struct esdhc_platform_data *boarddata) 933 struct pltfm_imx_data *imx_data)
927{ 934{
928 return -ENODEV; 935 return -ENODEV;
929} 936}
930#endif 937#endif
931 938
939static int sdhci_esdhc_imx_probe_nondt(struct platform_device *pdev,
940 struct sdhci_host *host,
941 struct pltfm_imx_data *imx_data)
942{
943 struct esdhc_platform_data *boarddata = &imx_data->boarddata;
944 int err;
945
946 if (!host->mmc->parent->platform_data) {
947 dev_err(mmc_dev(host->mmc), "no board data!\n");
948 return -EINVAL;
949 }
950
951 imx_data->boarddata = *((struct esdhc_platform_data *)
952 host->mmc->parent->platform_data);
953 /* write_protect */
954 if (boarddata->wp_type == ESDHC_WP_GPIO) {
955 err = mmc_gpio_request_ro(host->mmc, boarddata->wp_gpio);
956 if (err) {
957 dev_err(mmc_dev(host->mmc),
958 "failed to request write-protect gpio!\n");
959 return err;
960 }
961 host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
962 }
963
964 /* card_detect */
965 switch (boarddata->cd_type) {
966 case ESDHC_CD_GPIO:
967 err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio, 0);
968 if (err) {
969 dev_err(mmc_dev(host->mmc),
970 "failed to request card-detect gpio!\n");
971 return err;
972 }
973 /* fall through */
974
975 case ESDHC_CD_CONTROLLER:
976 /* we have a working card_detect back */
977 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
978 break;
979
980 case ESDHC_CD_PERMANENT:
981 host->mmc->caps |= MMC_CAP_NONREMOVABLE;
982 break;
983
984 case ESDHC_CD_NONE:
985 break;
986 }
987
988 switch (boarddata->max_bus_width) {
989 case 8:
990 host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA;
991 break;
992 case 4:
993 host->mmc->caps |= MMC_CAP_4_BIT_DATA;
994 break;
995 case 1:
996 default:
997 host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
998 break;
999 }
1000
1001 return 0;
1002}
1003
932static int sdhci_esdhc_imx_probe(struct platform_device *pdev) 1004static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
933{ 1005{
934 const struct of_device_id *of_id = 1006 const struct of_device_id *of_id =
935 of_match_device(imx_esdhc_dt_ids, &pdev->dev); 1007 of_match_device(imx_esdhc_dt_ids, &pdev->dev);
936 struct sdhci_pltfm_host *pltfm_host; 1008 struct sdhci_pltfm_host *pltfm_host;
937 struct sdhci_host *host; 1009 struct sdhci_host *host;
938 struct esdhc_platform_data *boarddata;
939 int err; 1010 int err;
940 struct pltfm_imx_data *imx_data; 1011 struct pltfm_imx_data *imx_data;
941 bool dt = true;
942 1012
943 host = sdhci_pltfm_init(pdev, &sdhci_esdhc_imx_pdata, 0); 1013 host = sdhci_pltfm_init(pdev, &sdhci_esdhc_imx_pdata, 0);
944 if (IS_ERR(host)) 1014 if (IS_ERR(host))
@@ -1030,84 +1100,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
1030 if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536) 1100 if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536)
1031 host->quirks |= SDHCI_QUIRK_BROKEN_ADMA; 1101 host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
1032 1102
1033 boarddata = &imx_data->boarddata; 1103 if (of_id)
1034 if (sdhci_esdhc_imx_probe_dt(pdev, host, boarddata) < 0) { 1104 err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data);
1035 if (!host->mmc->parent->platform_data) { 1105 else
1036 dev_err(mmc_dev(host->mmc), "no board data!\n"); 1106 err = sdhci_esdhc_imx_probe_nondt(pdev, host, imx_data);
1037 err = -EINVAL; 1107 if (err)
1038 goto disable_clk; 1108 goto disable_clk;
1039 }
1040 imx_data->boarddata = *((struct esdhc_platform_data *)
1041 host->mmc->parent->platform_data);
1042 dt = false;
1043 }
1044 /* write_protect */
1045 if (boarddata->wp_type == ESDHC_WP_GPIO && !dt) {
1046 err = mmc_gpio_request_ro(host->mmc, boarddata->wp_gpio);
1047 if (err) {
1048 dev_err(mmc_dev(host->mmc),
1049 "failed to request write-protect gpio!\n");
1050 goto disable_clk;
1051 }
1052 host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
1053 }
1054
1055 /* card_detect */
1056 switch (boarddata->cd_type) {
1057 case ESDHC_CD_GPIO:
1058 if (dt)
1059 break;
1060 err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio, 0);
1061 if (err) {
1062 dev_err(mmc_dev(host->mmc),
1063 "failed to request card-detect gpio!\n");
1064 goto disable_clk;
1065 }
1066 /* fall through */
1067
1068 case ESDHC_CD_CONTROLLER:
1069 /* we have a working card_detect back */
1070 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
1071 break;
1072
1073 case ESDHC_CD_PERMANENT:
1074 host->mmc->caps |= MMC_CAP_NONREMOVABLE;
1075 break;
1076
1077 case ESDHC_CD_NONE:
1078 break;
1079 }
1080
1081 switch (boarddata->max_bus_width) {
1082 case 8:
1083 host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA;
1084 break;
1085 case 4:
1086 host->mmc->caps |= MMC_CAP_4_BIT_DATA;
1087 break;
1088 case 1:
1089 default:
1090 host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
1091 break;
1092 }
1093
1094 /* sdr50 and sdr104 needs work on 1.8v signal voltage */
1095 if ((boarddata->support_vsel) && esdhc_is_usdhc(imx_data) &&
1096 !IS_ERR(imx_data->pins_default)) {
1097 imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl,
1098 ESDHC_PINCTRL_STATE_100MHZ);
1099 imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
1100 ESDHC_PINCTRL_STATE_200MHZ);
1101 if (IS_ERR(imx_data->pins_100mhz) ||
1102 IS_ERR(imx_data->pins_200mhz)) {
1103 dev_warn(mmc_dev(host->mmc),
1104 "could not get ultra high speed state, work on normal mode\n");
1105 /* fall back to not support uhs by specify no 1.8v quirk */
1106 host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
1107 }
1108 } else {
1109 host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
1110 }
1111 1109
1112 err = sdhci_add_host(host); 1110 err = sdhci_add_host(host);
1113 if (err) 1111 if (err)
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
index 3497cfaf683c..a870c42731d7 100644
--- a/drivers/mmc/host/sdhci-esdhc.h
+++ b/drivers/mmc/host/sdhci-esdhc.h
@@ -45,6 +45,6 @@
45#define ESDHC_DMA_SYSCTL 0x40c 45#define ESDHC_DMA_SYSCTL 0x40c
46#define ESDHC_DMA_SNOOP 0x00000040 46#define ESDHC_DMA_SNOOP 0x00000040
47 47
48#define ESDHC_HOST_CONTROL_RES 0x05 48#define ESDHC_HOST_CONTROL_RES 0x01
49 49
50#endif /* _DRIVERS_MMC_SDHCI_ESDHC_H */ 50#endif /* _DRIVERS_MMC_SDHCI_ESDHC_H */
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index 9cd5fc62f130..946d37f94a31 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -411,6 +411,7 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
411 goto err_of_parse; 411 goto err_of_parse;
412 sdhci_get_of_property(pdev); 412 sdhci_get_of_property(pdev);
413 pdata = pxav3_get_mmc_pdata(dev); 413 pdata = pxav3_get_mmc_pdata(dev);
414 pdev->dev.platform_data = pdata;
414 } else if (pdata) { 415 } else if (pdata) {
415 /* on-chip device */ 416 /* on-chip device */
416 if (pdata->flags & PXA_FLAG_CARD_PERMANENT) 417 if (pdata->flags & PXA_FLAG_CARD_PERMANENT)
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
index df088343d60f..255a896769b8 100644
--- a/drivers/mmc/host/sdhci-spear.c
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -4,7 +4,7 @@
4 * Support of SDHCI platform devices for spear soc family 4 * Support of SDHCI platform devices for spear soc family
5 * 5 *
6 * Copyright (C) 2010 ST Microelectronics 6 * Copyright (C) 2010 ST Microelectronics
7 * Viresh Kumar <viresh.linux@gmail.com> 7 * Viresh Kumar <vireshk@kernel.org>
8 * 8 *
9 * Inspired by sdhci-pltfm.c 9 * Inspired by sdhci-pltfm.c
10 * 10 *
@@ -211,5 +211,5 @@ static struct platform_driver sdhci_driver = {
211module_platform_driver(sdhci_driver); 211module_platform_driver(sdhci_driver);
212 212
213MODULE_DESCRIPTION("SPEAr Secure Digital Host Controller Interface driver"); 213MODULE_DESCRIPTION("SPEAr Secure Digital Host Controller Interface driver");
214MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); 214MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
215MODULE_LICENSE("GPL v2"); 215MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index bc1445238fb3..1dbe93232030 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2866,6 +2866,7 @@ int sdhci_add_host(struct sdhci_host *host)
2866 u32 max_current_caps; 2866 u32 max_current_caps;
2867 unsigned int ocr_avail; 2867 unsigned int ocr_avail;
2868 unsigned int override_timeout_clk; 2868 unsigned int override_timeout_clk;
2869 u32 max_clk;
2869 int ret; 2870 int ret;
2870 2871
2871 WARN_ON(host == NULL); 2872 WARN_ON(host == NULL);
@@ -2978,8 +2979,11 @@ int sdhci_add_host(struct sdhci_host *host)
2978 GFP_KERNEL); 2979 GFP_KERNEL);
2979 host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL); 2980 host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL);
2980 if (!host->adma_table || !host->align_buffer) { 2981 if (!host->adma_table || !host->align_buffer) {
2981 dma_free_coherent(mmc_dev(mmc), host->adma_table_sz, 2982 if (host->adma_table)
2982 host->adma_table, host->adma_addr); 2983 dma_free_coherent(mmc_dev(mmc),
2984 host->adma_table_sz,
2985 host->adma_table,
2986 host->adma_addr);
2983 kfree(host->align_buffer); 2987 kfree(host->align_buffer);
2984 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", 2988 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
2985 mmc_hostname(mmc)); 2989 mmc_hostname(mmc));
@@ -3047,18 +3051,22 @@ int sdhci_add_host(struct sdhci_host *host)
3047 * Set host parameters. 3051 * Set host parameters.
3048 */ 3052 */
3049 mmc->ops = &sdhci_ops; 3053 mmc->ops = &sdhci_ops;
3050 mmc->f_max = host->max_clk; 3054 max_clk = host->max_clk;
3055
3051 if (host->ops->get_min_clock) 3056 if (host->ops->get_min_clock)
3052 mmc->f_min = host->ops->get_min_clock(host); 3057 mmc->f_min = host->ops->get_min_clock(host);
3053 else if (host->version >= SDHCI_SPEC_300) { 3058 else if (host->version >= SDHCI_SPEC_300) {
3054 if (host->clk_mul) { 3059 if (host->clk_mul) {
3055 mmc->f_min = (host->max_clk * host->clk_mul) / 1024; 3060 mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
3056 mmc->f_max = host->max_clk * host->clk_mul; 3061 max_clk = host->max_clk * host->clk_mul;
3057 } else 3062 } else
3058 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; 3063 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
3059 } else 3064 } else
3060 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; 3065 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
3061 3066
3067 if (!mmc->f_max || (mmc->f_max && (mmc->f_max > max_clk)))
3068 mmc->f_max = max_clk;
3069
3062 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { 3070 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
3063 host->timeout_clk = (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> 3071 host->timeout_clk = (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >>
3064 SDHCI_TIMEOUT_CLK_SHIFT; 3072 SDHCI_TIMEOUT_CLK_SHIFT;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 19eb990d398c..a98dd4f1b0e3 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -625,6 +625,23 @@ static void bond_set_dev_addr(struct net_device *bond_dev,
625 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev); 625 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
626} 626}
627 627
628static struct slave *bond_get_old_active(struct bonding *bond,
629 struct slave *new_active)
630{
631 struct slave *slave;
632 struct list_head *iter;
633
634 bond_for_each_slave(bond, slave, iter) {
635 if (slave == new_active)
636 continue;
637
638 if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr))
639 return slave;
640 }
641
642 return NULL;
643}
644
628/* bond_do_fail_over_mac 645/* bond_do_fail_over_mac
629 * 646 *
630 * Perform special MAC address swapping for fail_over_mac settings 647 * Perform special MAC address swapping for fail_over_mac settings
@@ -652,6 +669,9 @@ static void bond_do_fail_over_mac(struct bonding *bond,
652 if (!new_active) 669 if (!new_active)
653 return; 670 return;
654 671
672 if (!old_active)
673 old_active = bond_get_old_active(bond, new_active);
674
655 if (old_active) { 675 if (old_active) {
656 ether_addr_copy(tmp_mac, new_active->dev->dev_addr); 676 ether_addr_copy(tmp_mac, new_active->dev->dev_addr);
657 ether_addr_copy(saddr.sa_data, 677 ether_addr_copy(saddr.sa_data,
@@ -689,40 +709,57 @@ out:
689 709
690} 710}
691 711
692static bool bond_should_change_active(struct bonding *bond) 712static struct slave *bond_choose_primary_or_current(struct bonding *bond)
693{ 713{
694 struct slave *prim = rtnl_dereference(bond->primary_slave); 714 struct slave *prim = rtnl_dereference(bond->primary_slave);
695 struct slave *curr = rtnl_dereference(bond->curr_active_slave); 715 struct slave *curr = rtnl_dereference(bond->curr_active_slave);
696 716
697 if (!prim || !curr || curr->link != BOND_LINK_UP) 717 if (!prim || prim->link != BOND_LINK_UP) {
698 return true; 718 if (!curr || curr->link != BOND_LINK_UP)
719 return NULL;
720 return curr;
721 }
722
699 if (bond->force_primary) { 723 if (bond->force_primary) {
700 bond->force_primary = false; 724 bond->force_primary = false;
701 return true; 725 return prim;
726 }
727
728 if (!curr || curr->link != BOND_LINK_UP)
729 return prim;
730
731 /* At this point, prim and curr are both up */
732 switch (bond->params.primary_reselect) {
733 case BOND_PRI_RESELECT_ALWAYS:
734 return prim;
735 case BOND_PRI_RESELECT_BETTER:
736 if (prim->speed < curr->speed)
737 return curr;
738 if (prim->speed == curr->speed && prim->duplex <= curr->duplex)
739 return curr;
740 return prim;
741 case BOND_PRI_RESELECT_FAILURE:
742 return curr;
743 default:
744 netdev_err(bond->dev, "impossible primary_reselect %d\n",
745 bond->params.primary_reselect);
746 return curr;
702 } 747 }
703 if (bond->params.primary_reselect == BOND_PRI_RESELECT_BETTER &&
704 (prim->speed < curr->speed ||
705 (prim->speed == curr->speed && prim->duplex <= curr->duplex)))
706 return false;
707 if (bond->params.primary_reselect == BOND_PRI_RESELECT_FAILURE)
708 return false;
709 return true;
710} 748}
711 749
712/** 750/**
713 * find_best_interface - select the best available slave to be the active one 751 * bond_find_best_slave - select the best available slave to be the active one
714 * @bond: our bonding struct 752 * @bond: our bonding struct
715 */ 753 */
716static struct slave *bond_find_best_slave(struct bonding *bond) 754static struct slave *bond_find_best_slave(struct bonding *bond)
717{ 755{
718 struct slave *slave, *bestslave = NULL, *primary; 756 struct slave *slave, *bestslave = NULL;
719 struct list_head *iter; 757 struct list_head *iter;
720 int mintime = bond->params.updelay; 758 int mintime = bond->params.updelay;
721 759
722 primary = rtnl_dereference(bond->primary_slave); 760 slave = bond_choose_primary_or_current(bond);
723 if (primary && primary->link == BOND_LINK_UP && 761 if (slave)
724 bond_should_change_active(bond)) 762 return slave;
725 return primary;
726 763
727 bond_for_each_slave(bond, slave, iter) { 764 bond_for_each_slave(bond, slave, iter) {
728 if (slave->link == BOND_LINK_UP) 765 if (slave->link == BOND_LINK_UP)
@@ -749,6 +786,7 @@ static bool bond_should_notify_peers(struct bonding *bond)
749 slave ? slave->dev->name : "NULL"); 786 slave ? slave->dev->name : "NULL");
750 787
751 if (!slave || !bond->send_peer_notif || 788 if (!slave || !bond->send_peer_notif ||
789 !netif_carrier_ok(bond->dev) ||
752 test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state)) 790 test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
753 return false; 791 return false;
754 792
@@ -1708,9 +1746,16 @@ err_free:
1708 1746
1709err_undo_flags: 1747err_undo_flags:
1710 /* Enslave of first slave has failed and we need to fix master's mac */ 1748 /* Enslave of first slave has failed and we need to fix master's mac */
1711 if (!bond_has_slaves(bond) && 1749 if (!bond_has_slaves(bond)) {
1712 ether_addr_equal_64bits(bond_dev->dev_addr, slave_dev->dev_addr)) 1750 if (ether_addr_equal_64bits(bond_dev->dev_addr,
1713 eth_hw_addr_random(bond_dev); 1751 slave_dev->dev_addr))
1752 eth_hw_addr_random(bond_dev);
1753 if (bond_dev->type != ARPHRD_ETHER) {
1754 ether_setup(bond_dev);
1755 bond_dev->flags |= IFF_MASTER;
1756 bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1757 }
1758 }
1714 1759
1715 return res; 1760 return res;
1716} 1761}
@@ -1899,6 +1944,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
1899 bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; 1944 bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
1900 netdev_info(bond_dev, "Destroying bond %s\n", 1945 netdev_info(bond_dev, "Destroying bond %s\n",
1901 bond_dev->name); 1946 bond_dev->name);
1947 bond_remove_proc_entry(bond);
1902 unregister_netdevice(bond_dev); 1948 unregister_netdevice(bond_dev);
1903 } 1949 }
1904 return ret; 1950 return ret;
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index f4e40aa4d2a2..945c0955a967 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -577,10 +577,10 @@ static void at91_rx_overflow_err(struct net_device *dev)
577 577
578 cf->can_id |= CAN_ERR_CRTL; 578 cf->can_id |= CAN_ERR_CRTL;
579 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; 579 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
580 netif_receive_skb(skb);
581 580
582 stats->rx_packets++; 581 stats->rx_packets++;
583 stats->rx_bytes += cf->can_dlc; 582 stats->rx_bytes += cf->can_dlc;
583 netif_receive_skb(skb);
584} 584}
585 585
586/** 586/**
@@ -642,10 +642,10 @@ static void at91_read_msg(struct net_device *dev, unsigned int mb)
642 } 642 }
643 643
644 at91_read_mb(dev, mb, cf); 644 at91_read_mb(dev, mb, cf);
645 netif_receive_skb(skb);
646 645
647 stats->rx_packets++; 646 stats->rx_packets++;
648 stats->rx_bytes += cf->can_dlc; 647 stats->rx_bytes += cf->can_dlc;
648 netif_receive_skb(skb);
649 649
650 can_led_event(dev, CAN_LED_EVENT_RX); 650 can_led_event(dev, CAN_LED_EVENT_RX);
651} 651}
@@ -802,10 +802,10 @@ static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr)
802 return 0; 802 return 0;
803 803
804 at91_poll_err_frame(dev, cf, reg_sr); 804 at91_poll_err_frame(dev, cf, reg_sr);
805 netif_receive_skb(skb);
806 805
807 dev->stats.rx_packets++; 806 dev->stats.rx_packets++;
808 dev->stats.rx_bytes += cf->can_dlc; 807 dev->stats.rx_bytes += cf->can_dlc;
808 netif_receive_skb(skb);
809 809
810 return 1; 810 return 1;
811} 811}
@@ -1067,10 +1067,10 @@ static void at91_irq_err(struct net_device *dev)
1067 return; 1067 return;
1068 1068
1069 at91_irq_err_state(dev, cf, new_state); 1069 at91_irq_err_state(dev, cf, new_state);
1070 netif_rx(skb);
1071 1070
1072 dev->stats.rx_packets++; 1071 dev->stats.rx_packets++;
1073 dev->stats.rx_bytes += cf->can_dlc; 1072 dev->stats.rx_bytes += cf->can_dlc;
1073 netif_rx(skb);
1074 1074
1075 priv->can.state = new_state; 1075 priv->can.state = new_state;
1076} 1076}
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 27ad312e7abf..57dadd52b428 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -424,10 +424,9 @@ static void bfin_can_rx(struct net_device *dev, u16 isrc)
424 cf->data[6 - i] = (6 - i) < cf->can_dlc ? (val >> 8) : 0; 424 cf->data[6 - i] = (6 - i) < cf->can_dlc ? (val >> 8) : 0;
425 } 425 }
426 426
427 netif_rx(skb);
428
429 stats->rx_packets++; 427 stats->rx_packets++;
430 stats->rx_bytes += cf->can_dlc; 428 stats->rx_bytes += cf->can_dlc;
429 netif_rx(skb);
431} 430}
432 431
433static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status) 432static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
@@ -508,10 +507,9 @@ static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
508 507
509 priv->can.state = state; 508 priv->can.state = state;
510 509
511 netif_rx(skb);
512
513 stats->rx_packets++; 510 stats->rx_packets++;
514 stats->rx_bytes += cf->can_dlc; 511 stats->rx_bytes += cf->can_dlc;
512 netif_rx(skb);
515 513
516 return 0; 514 return 0;
517} 515}
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 041525d2595c..5d214d135332 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -592,6 +592,7 @@ static int c_can_start(struct net_device *dev)
592{ 592{
593 struct c_can_priv *priv = netdev_priv(dev); 593 struct c_can_priv *priv = netdev_priv(dev);
594 int err; 594 int err;
595 struct pinctrl *p;
595 596
596 /* basic c_can configuration */ 597 /* basic c_can configuration */
597 err = c_can_chip_config(dev); 598 err = c_can_chip_config(dev);
@@ -604,8 +605,13 @@ static int c_can_start(struct net_device *dev)
604 605
605 priv->can.state = CAN_STATE_ERROR_ACTIVE; 606 priv->can.state = CAN_STATE_ERROR_ACTIVE;
606 607
607 /* activate pins */ 608 /* Attempt to use "active" if available else use "default" */
608 pinctrl_pm_select_default_state(dev->dev.parent); 609 p = pinctrl_get_select(priv->device, "active");
610 if (!IS_ERR(p))
611 pinctrl_put(p);
612 else
613 pinctrl_pm_select_default_state(priv->device);
614
609 return 0; 615 return 0;
610} 616}
611 617
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index c11d44984036..70a8cbb29e75 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -504,10 +504,10 @@ static void cc770_rx(struct net_device *dev, unsigned int mo, u8 ctrl1)
504 for (i = 0; i < cf->can_dlc; i++) 504 for (i = 0; i < cf->can_dlc; i++)
505 cf->data[i] = cc770_read_reg(priv, msgobj[mo].data[i]); 505 cf->data[i] = cc770_read_reg(priv, msgobj[mo].data[i]);
506 } 506 }
507 netif_rx(skb);
508 507
509 stats->rx_packets++; 508 stats->rx_packets++;
510 stats->rx_bytes += cf->can_dlc; 509 stats->rx_bytes += cf->can_dlc;
510 netif_rx(skb);
511} 511}
512 512
513static int cc770_err(struct net_device *dev, u8 status) 513static int cc770_err(struct net_device *dev, u8 status)
@@ -584,10 +584,10 @@ static int cc770_err(struct net_device *dev, u8 status)
584 } 584 }
585 } 585 }
586 586
587 netif_rx(skb);
588 587
589 stats->rx_packets++; 588 stats->rx_packets++;
590 stats->rx_bytes += cf->can_dlc; 589 stats->rx_bytes += cf->can_dlc;
590 netif_rx(skb);
591 591
592 return 0; 592 return 0;
593} 593}
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index e9b1810d319f..aede704605c6 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -440,9 +440,6 @@ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
440 struct can_frame *cf = (struct can_frame *)skb->data; 440 struct can_frame *cf = (struct can_frame *)skb->data;
441 u8 dlc = cf->can_dlc; 441 u8 dlc = cf->can_dlc;
442 442
443 if (!(skb->tstamp.tv64))
444 __net_timestamp(skb);
445
446 netif_rx(priv->echo_skb[idx]); 443 netif_rx(priv->echo_skb[idx]);
447 priv->echo_skb[idx] = NULL; 444 priv->echo_skb[idx] = NULL;
448 445
@@ -578,7 +575,6 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
578 if (unlikely(!skb)) 575 if (unlikely(!skb))
579 return NULL; 576 return NULL;
580 577
581 __net_timestamp(skb);
582 skb->protocol = htons(ETH_P_CAN); 578 skb->protocol = htons(ETH_P_CAN);
583 skb->pkt_type = PACKET_BROADCAST; 579 skb->pkt_type = PACKET_BROADCAST;
584 skb->ip_summed = CHECKSUM_UNNECESSARY; 580 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -589,6 +585,7 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
589 585
590 can_skb_reserve(skb); 586 can_skb_reserve(skb);
591 can_skb_prv(skb)->ifindex = dev->ifindex; 587 can_skb_prv(skb)->ifindex = dev->ifindex;
588 can_skb_prv(skb)->skbcnt = 0;
592 589
593 *cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame)); 590 *cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
594 memset(*cf, 0, sizeof(struct can_frame)); 591 memset(*cf, 0, sizeof(struct can_frame));
@@ -607,7 +604,6 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
607 if (unlikely(!skb)) 604 if (unlikely(!skb))
608 return NULL; 605 return NULL;
609 606
610 __net_timestamp(skb);
611 skb->protocol = htons(ETH_P_CANFD); 607 skb->protocol = htons(ETH_P_CANFD);
612 skb->pkt_type = PACKET_BROADCAST; 608 skb->pkt_type = PACKET_BROADCAST;
613 skb->ip_summed = CHECKSUM_UNNECESSARY; 609 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -618,6 +614,7 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
618 614
619 can_skb_reserve(skb); 615 can_skb_reserve(skb);
620 can_skb_prv(skb)->ifindex = dev->ifindex; 616 can_skb_prv(skb)->ifindex = dev->ifindex;
617 can_skb_prv(skb)->skbcnt = 0;
621 618
622 *cfd = (struct canfd_frame *)skb_put(skb, sizeof(struct canfd_frame)); 619 *cfd = (struct canfd_frame *)skb_put(skb, sizeof(struct canfd_frame));
623 memset(*cfd, 0, sizeof(struct canfd_frame)); 620 memset(*cfd, 0, sizeof(struct canfd_frame));
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 6201c5a1a884..b1e8d729851c 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -577,10 +577,10 @@ static int flexcan_poll_bus_err(struct net_device *dev, u32 reg_esr)
577 return 0; 577 return 0;
578 578
579 do_bus_err(dev, cf, reg_esr); 579 do_bus_err(dev, cf, reg_esr);
580 netif_receive_skb(skb);
581 580
582 dev->stats.rx_packets++; 581 dev->stats.rx_packets++;
583 dev->stats.rx_bytes += cf->can_dlc; 582 dev->stats.rx_bytes += cf->can_dlc;
583 netif_receive_skb(skb);
584 584
585 return 1; 585 return 1;
586} 586}
@@ -622,10 +622,9 @@ static int flexcan_poll_state(struct net_device *dev, u32 reg_esr)
622 if (unlikely(new_state == CAN_STATE_BUS_OFF)) 622 if (unlikely(new_state == CAN_STATE_BUS_OFF))
623 can_bus_off(dev); 623 can_bus_off(dev);
624 624
625 netif_receive_skb(skb);
626
627 dev->stats.rx_packets++; 625 dev->stats.rx_packets++;
628 dev->stats.rx_bytes += cf->can_dlc; 626 dev->stats.rx_bytes += cf->can_dlc;
627 netif_receive_skb(skb);
629 628
630 return 1; 629 return 1;
631} 630}
@@ -670,10 +669,10 @@ static int flexcan_read_frame(struct net_device *dev)
670 } 669 }
671 670
672 flexcan_read_fifo(dev, cf); 671 flexcan_read_fifo(dev, cf);
673 netif_receive_skb(skb);
674 672
675 stats->rx_packets++; 673 stats->rx_packets++;
676 stats->rx_bytes += cf->can_dlc; 674 stats->rx_bytes += cf->can_dlc;
675 netif_receive_skb(skb);
677 676
678 can_led_event(dev, CAN_LED_EVENT_RX); 677 can_led_event(dev, CAN_LED_EVENT_RX);
679 678
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index e3d7e22a4fa0..db9538d4b358 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -1216,11 +1216,12 @@ static int grcan_receive(struct net_device *dev, int budget)
1216 cf->data[i] = (u8)(slot[j] >> shift); 1216 cf->data[i] = (u8)(slot[j] >> shift);
1217 } 1217 }
1218 } 1218 }
1219 netif_receive_skb(skb);
1220 1219
1221 /* Update statistics and read pointer */ 1220 /* Update statistics and read pointer */
1222 stats->rx_packets++; 1221 stats->rx_packets++;
1223 stats->rx_bytes += cf->can_dlc; 1222 stats->rx_bytes += cf->can_dlc;
1223 netif_receive_skb(skb);
1224
1224 rd = grcan_ring_add(rd, GRCAN_MSG_SIZE, dma->rx.size); 1225 rd = grcan_ring_add(rd, GRCAN_MSG_SIZE, dma->rx.size);
1225 } 1226 }
1226 1227
diff --git a/drivers/net/can/rcar_can.c b/drivers/net/can/rcar_can.c
index 7deb80dcbe8c..7bd54191f962 100644
--- a/drivers/net/can/rcar_can.c
+++ b/drivers/net/can/rcar_can.c
@@ -508,7 +508,8 @@ static int rcar_can_open(struct net_device *ndev)
508 508
509 err = clk_prepare_enable(priv->clk); 509 err = clk_prepare_enable(priv->clk);
510 if (err) { 510 if (err) {
511 netdev_err(ndev, "failed to enable periperal clock, error %d\n", 511 netdev_err(ndev,
512 "failed to enable peripheral clock, error %d\n",
512 err); 513 err);
513 goto out; 514 goto out;
514 } 515 }
@@ -526,7 +527,8 @@ static int rcar_can_open(struct net_device *ndev)
526 napi_enable(&priv->napi); 527 napi_enable(&priv->napi);
527 err = request_irq(ndev->irq, rcar_can_interrupt, 0, ndev->name, ndev); 528 err = request_irq(ndev->irq, rcar_can_interrupt, 0, ndev->name, ndev);
528 if (err) { 529 if (err) {
529 netdev_err(ndev, "error requesting interrupt %x\n", ndev->irq); 530 netdev_err(ndev, "request_irq(%d) failed, error %d\n",
531 ndev->irq, err);
530 goto out_close; 532 goto out_close;
531 } 533 }
532 can_led_event(ndev, CAN_LED_EVENT_OPEN); 534 can_led_event(ndev, CAN_LED_EVENT_OPEN);
@@ -758,8 +760,9 @@ static int rcar_can_probe(struct platform_device *pdev)
758 } 760 }
759 761
760 irq = platform_get_irq(pdev, 0); 762 irq = platform_get_irq(pdev, 0);
761 if (!irq) { 763 if (irq < 0) {
762 dev_err(&pdev->dev, "No IRQ resource\n"); 764 dev_err(&pdev->dev, "No IRQ resource\n");
765 err = irq;
763 goto fail; 766 goto fail;
764 } 767 }
765 768
@@ -782,7 +785,8 @@ static int rcar_can_probe(struct platform_device *pdev)
782 priv->clk = devm_clk_get(&pdev->dev, "clkp1"); 785 priv->clk = devm_clk_get(&pdev->dev, "clkp1");
783 if (IS_ERR(priv->clk)) { 786 if (IS_ERR(priv->clk)) {
784 err = PTR_ERR(priv->clk); 787 err = PTR_ERR(priv->clk);
785 dev_err(&pdev->dev, "cannot get peripheral clock: %d\n", err); 788 dev_err(&pdev->dev, "cannot get peripheral clock, error %d\n",
789 err);
786 goto fail_clk; 790 goto fail_clk;
787 } 791 }
788 792
@@ -794,7 +798,7 @@ static int rcar_can_probe(struct platform_device *pdev)
794 priv->can_clk = devm_clk_get(&pdev->dev, clock_names[clock_select]); 798 priv->can_clk = devm_clk_get(&pdev->dev, clock_names[clock_select]);
795 if (IS_ERR(priv->can_clk)) { 799 if (IS_ERR(priv->can_clk)) {
796 err = PTR_ERR(priv->can_clk); 800 err = PTR_ERR(priv->can_clk);
797 dev_err(&pdev->dev, "cannot get CAN clock: %d\n", err); 801 dev_err(&pdev->dev, "cannot get CAN clock, error %d\n", err);
798 goto fail_clk; 802 goto fail_clk;
799 } 803 }
800 804
@@ -823,7 +827,7 @@ static int rcar_can_probe(struct platform_device *pdev)
823 827
824 devm_can_led_init(ndev); 828 devm_can_led_init(ndev);
825 829
826 dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%u)\n", 830 dev_info(&pdev->dev, "device registered (regs @ %p, IRQ%d)\n",
827 priv->regs, ndev->irq); 831 priv->regs, ndev->irq);
828 832
829 return 0; 833 return 0;
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 32bd7f451aa4..7b92e911a616 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -377,10 +377,9 @@ static void sja1000_rx(struct net_device *dev)
377 /* release receive buffer */ 377 /* release receive buffer */
378 sja1000_write_cmdreg(priv, CMD_RRB); 378 sja1000_write_cmdreg(priv, CMD_RRB);
379 379
380 netif_rx(skb);
381
382 stats->rx_packets++; 380 stats->rx_packets++;
383 stats->rx_bytes += cf->can_dlc; 381 stats->rx_bytes += cf->can_dlc;
382 netif_rx(skb);
384 383
385 can_led_event(dev, CAN_LED_EVENT_RX); 384 can_led_event(dev, CAN_LED_EVENT_RX);
386} 385}
@@ -484,10 +483,9 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
484 can_bus_off(dev); 483 can_bus_off(dev);
485 } 484 }
486 485
487 netif_rx(skb);
488
489 stats->rx_packets++; 486 stats->rx_packets++;
490 stats->rx_bytes += cf->can_dlc; 487 stats->rx_bytes += cf->can_dlc;
488 netif_rx(skb);
491 489
492 return 0; 490 return 0;
493} 491}
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index f64f5290d6f8..9a3f15cb7ef4 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -207,7 +207,6 @@ static void slc_bump(struct slcan *sl)
207 if (!skb) 207 if (!skb)
208 return; 208 return;
209 209
210 __net_timestamp(skb);
211 skb->dev = sl->dev; 210 skb->dev = sl->dev;
212 skb->protocol = htons(ETH_P_CAN); 211 skb->protocol = htons(ETH_P_CAN);
213 skb->pkt_type = PACKET_BROADCAST; 212 skb->pkt_type = PACKET_BROADCAST;
@@ -215,13 +214,14 @@ static void slc_bump(struct slcan *sl)
215 214
216 can_skb_reserve(skb); 215 can_skb_reserve(skb);
217 can_skb_prv(skb)->ifindex = sl->dev->ifindex; 216 can_skb_prv(skb)->ifindex = sl->dev->ifindex;
217 can_skb_prv(skb)->skbcnt = 0;
218 218
219 memcpy(skb_put(skb, sizeof(struct can_frame)), 219 memcpy(skb_put(skb, sizeof(struct can_frame)),
220 &cf, sizeof(struct can_frame)); 220 &cf, sizeof(struct can_frame));
221 netif_rx_ni(skb);
222 221
223 sl->dev->stats.rx_packets++; 222 sl->dev->stats.rx_packets++;
224 sl->dev->stats.rx_bytes += cf.can_dlc; 223 sl->dev->stats.rx_bytes += cf.can_dlc;
224 netif_rx_ni(skb);
225} 225}
226 226
227/* parse tty input stream */ 227/* parse tty input stream */
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index c1a95a34d62e..b7e83c212023 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -1086,8 +1086,8 @@ static int mcp251x_can_probe(struct spi_device *spi)
1086 if (ret) 1086 if (ret)
1087 goto out_clk; 1087 goto out_clk;
1088 1088
1089 priv->power = devm_regulator_get(&spi->dev, "vdd"); 1089 priv->power = devm_regulator_get_optional(&spi->dev, "vdd");
1090 priv->transceiver = devm_regulator_get(&spi->dev, "xceiver"); 1090 priv->transceiver = devm_regulator_get_optional(&spi->dev, "xceiver");
1091 if ((PTR_ERR(priv->power) == -EPROBE_DEFER) || 1091 if ((PTR_ERR(priv->power) == -EPROBE_DEFER) ||
1092 (PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) { 1092 (PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) {
1093 ret = -EPROBE_DEFER; 1093 ret = -EPROBE_DEFER;
@@ -1222,17 +1222,16 @@ static int __maybe_unused mcp251x_can_resume(struct device *dev)
1222 struct spi_device *spi = to_spi_device(dev); 1222 struct spi_device *spi = to_spi_device(dev);
1223 struct mcp251x_priv *priv = spi_get_drvdata(spi); 1223 struct mcp251x_priv *priv = spi_get_drvdata(spi);
1224 1224
1225 if (priv->after_suspend & AFTER_SUSPEND_POWER) { 1225 if (priv->after_suspend & AFTER_SUSPEND_POWER)
1226 mcp251x_power_enable(priv->power, 1); 1226 mcp251x_power_enable(priv->power, 1);
1227
1228 if (priv->after_suspend & AFTER_SUSPEND_UP) {
1229 mcp251x_power_enable(priv->transceiver, 1);
1227 queue_work(priv->wq, &priv->restart_work); 1230 queue_work(priv->wq, &priv->restart_work);
1228 } else { 1231 } else {
1229 if (priv->after_suspend & AFTER_SUSPEND_UP) { 1232 priv->after_suspend = 0;
1230 mcp251x_power_enable(priv->transceiver, 1);
1231 queue_work(priv->wq, &priv->restart_work);
1232 } else {
1233 priv->after_suspend = 0;
1234 }
1235 } 1233 }
1234
1236 priv->force_quit = 0; 1235 priv->force_quit = 0;
1237 enable_irq(spi->irq); 1236 enable_irq(spi->irq);
1238 return 0; 1237 return 0;
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index e95a9e1a889f..cf345cbfe819 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -747,9 +747,9 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
747 } 747 }
748 } 748 }
749 749
750 netif_rx(skb);
751 stats->rx_packets++; 750 stats->rx_packets++;
752 stats->rx_bytes += cf->can_dlc; 751 stats->rx_bytes += cf->can_dlc;
752 netif_rx(skb);
753 753
754 return 0; 754 return 0;
755} 755}
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 866bac0ae7e9..2d390384ef3b 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -324,10 +324,9 @@ static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg)
324 cf->data[i] = msg->msg.can_msg.msg[i]; 324 cf->data[i] = msg->msg.can_msg.msg[i];
325 } 325 }
326 326
327 netif_rx(skb);
328
329 stats->rx_packets++; 327 stats->rx_packets++;
330 stats->rx_bytes += cf->can_dlc; 328 stats->rx_bytes += cf->can_dlc;
329 netif_rx(skb);
331} 330}
332 331
333static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg) 332static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
@@ -400,10 +399,9 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
400 stats->rx_errors++; 399 stats->rx_errors++;
401 } 400 }
402 401
403 netif_rx(skb);
404
405 stats->rx_packets++; 402 stats->rx_packets++;
406 stats->rx_bytes += cf->can_dlc; 403 stats->rx_bytes += cf->can_dlc;
404 netif_rx(skb);
407} 405}
408 406
409/* 407/*
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index 411c1af92c62..0e5a4493ba4f 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -301,13 +301,12 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
301 cf->data[7] = rxerr; 301 cf->data[7] = rxerr;
302 } 302 }
303 303
304 netif_rx(skb);
305
306 priv->bec.txerr = txerr; 304 priv->bec.txerr = txerr;
307 priv->bec.rxerr = rxerr; 305 priv->bec.rxerr = rxerr;
308 306
309 stats->rx_packets++; 307 stats->rx_packets++;
310 stats->rx_bytes += cf->can_dlc; 308 stats->rx_bytes += cf->can_dlc;
309 netif_rx(skb);
311 } 310 }
312} 311}
313 312
@@ -347,10 +346,9 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
347 cf->data[i] = msg->msg.rx.data[i]; 346 cf->data[i] = msg->msg.rx.data[i];
348 } 347 }
349 348
350 netif_rx(skb);
351
352 stats->rx_packets++; 349 stats->rx_packets++;
353 stats->rx_bytes += cf->can_dlc; 350 stats->rx_bytes += cf->can_dlc;
351 netif_rx(skb);
354 } 352 }
355 353
356 return; 354 return;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 72427f21edff..6b94007ae052 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -526,9 +526,9 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
526 hwts->hwtstamp = timeval_to_ktime(tv); 526 hwts->hwtstamp = timeval_to_ktime(tv);
527 } 527 }
528 528
529 netif_rx(skb);
530 mc->netdev->stats.rx_packets++; 529 mc->netdev->stats.rx_packets++;
531 mc->netdev->stats.rx_bytes += cf->can_dlc; 530 mc->netdev->stats.rx_bytes += cf->can_dlc;
531 netif_rx(skb);
532 532
533 return 0; 533 return 0;
534} 534}
@@ -659,12 +659,11 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
659 hwts = skb_hwtstamps(skb); 659 hwts = skb_hwtstamps(skb);
660 hwts->hwtstamp = timeval_to_ktime(tv); 660 hwts->hwtstamp = timeval_to_ktime(tv);
661 661
662 /* push the skb */
663 netif_rx(skb);
664
665 /* update statistics */ 662 /* update statistics */
666 mc->netdev->stats.rx_packets++; 663 mc->netdev->stats.rx_packets++;
667 mc->netdev->stats.rx_bytes += cf->can_dlc; 664 mc->netdev->stats.rx_bytes += cf->can_dlc;
665 /* push the skb */
666 netif_rx(skb);
668 667
669 return 0; 668 return 0;
670 669
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index dec51717635e..7d61b3279798 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -553,9 +553,9 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if,
553 hwts = skb_hwtstamps(skb); 553 hwts = skb_hwtstamps(skb);
554 hwts->hwtstamp = timeval_to_ktime(tv); 554 hwts->hwtstamp = timeval_to_ktime(tv);
555 555
556 netif_rx(skb);
557 netdev->stats.rx_packets++; 556 netdev->stats.rx_packets++;
558 netdev->stats.rx_bytes += can_frame->can_dlc; 557 netdev->stats.rx_bytes += can_frame->can_dlc;
558 netif_rx(skb);
559 559
560 return 0; 560 return 0;
561} 561}
@@ -670,9 +670,9 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if,
670 peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(er->ts32), &tv); 670 peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(er->ts32), &tv);
671 hwts = skb_hwtstamps(skb); 671 hwts = skb_hwtstamps(skb);
672 hwts->hwtstamp = timeval_to_ktime(tv); 672 hwts->hwtstamp = timeval_to_ktime(tv);
673 netif_rx(skb);
674 netdev->stats.rx_packets++; 673 netdev->stats.rx_packets++;
675 netdev->stats.rx_bytes += can_frame->can_dlc; 674 netdev->stats.rx_bytes += can_frame->can_dlc;
675 netif_rx(skb);
676 676
677 return 0; 677 return 0;
678} 678}
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index dd52c7a4c80d..de95b1ccba3e 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -461,10 +461,9 @@ static void usb_8dev_rx_err_msg(struct usb_8dev_priv *priv,
461 priv->bec.txerr = txerr; 461 priv->bec.txerr = txerr;
462 priv->bec.rxerr = rxerr; 462 priv->bec.rxerr = rxerr;
463 463
464 netif_rx(skb);
465
466 stats->rx_packets++; 464 stats->rx_packets++;
467 stats->rx_bytes += cf->can_dlc; 465 stats->rx_bytes += cf->can_dlc;
466 netif_rx(skb);
468} 467}
469 468
470/* Read data and status frames */ 469/* Read data and status frames */
@@ -494,10 +493,9 @@ static void usb_8dev_rx_can_msg(struct usb_8dev_priv *priv,
494 else 493 else
495 memcpy(cf->data, msg->data, cf->can_dlc); 494 memcpy(cf->data, msg->data, cf->can_dlc);
496 495
497 netif_rx(skb);
498
499 stats->rx_packets++; 496 stats->rx_packets++;
500 stats->rx_bytes += cf->can_dlc; 497 stats->rx_bytes += cf->can_dlc;
498 netif_rx(skb);
501 499
502 can_led_event(priv->netdev, CAN_LED_EVENT_RX); 500 can_led_event(priv->netdev, CAN_LED_EVENT_RX);
503 } else { 501 } else {
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index 0ce868de855d..674f367087c5 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -78,9 +78,6 @@ static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
78 skb->dev = dev; 78 skb->dev = dev;
79 skb->ip_summed = CHECKSUM_UNNECESSARY; 79 skb->ip_summed = CHECKSUM_UNNECESSARY;
80 80
81 if (!(skb->tstamp.tv64))
82 __net_timestamp(skb);
83
84 netif_rx_ni(skb); 81 netif_rx_ni(skb);
85} 82}
86 83
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 972982f8bea7..079897b3a955 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -696,9 +696,20 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
696 } 696 }
697 697
698 /* Include the pseudo-PHY address and the broadcast PHY address to 698 /* Include the pseudo-PHY address and the broadcast PHY address to
699 * divert reads towards our workaround 699 * divert reads towards our workaround. This is only required for
700 * 7445D0, since 7445E0 disconnects the internal switch pseudo-PHY such
701 * that we can use the regular SWITCH_MDIO master controller instead.
702 *
703 * By default, DSA initializes ds->phys_mii_mask to ds->phys_port_mask
704 * to have a 1:1 mapping between Port address and PHY address in order
705 * to utilize the slave_mii_bus instance to read from Port PHYs. This is
706 * not what we want here, so we initialize phys_mii_mask 0 to always
707 * utilize the "master" MDIO bus backed by the "mdio-unimac" driver.
700 */ 708 */
701 ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0)); 709 if (of_machine_is_compatible("brcm,bcm7445d0"))
710 ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0));
711 else
712 ds->phys_mii_mask = 0;
702 713
703 rev = reg_readl(priv, REG_SWITCH_REVISION); 714 rev = reg_readl(priv, REG_SWITCH_REVISION);
704 priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) & 715 priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index fd8547c2b79d..561342466076 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -1163,7 +1163,7 @@ int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
1163 1163
1164 newfid = __ffs(ps->fid_mask); 1164 newfid = __ffs(ps->fid_mask);
1165 ps->fid[port] = newfid; 1165 ps->fid[port] = newfid;
1166 ps->fid_mask &= (1 << newfid); 1166 ps->fid_mask &= ~(1 << newfid);
1167 ps->bridge_mask[fid] &= ~(1 << port); 1167 ps->bridge_mask[fid] &= ~(1 << port);
1168 ps->bridge_mask[newfid] = 1 << port; 1168 ps->bridge_mask[newfid] = 1 << port;
1169 1169
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 41095ebad97f..753887d02b46 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1763,16 +1763,9 @@ vortex_open(struct net_device *dev)
1763 vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); 1763 vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
1764 } 1764 }
1765 if (i != RX_RING_SIZE) { 1765 if (i != RX_RING_SIZE) {
1766 int j;
1767 pr_emerg("%s: no memory for rx ring\n", dev->name); 1766 pr_emerg("%s: no memory for rx ring\n", dev->name);
1768 for (j = 0; j < i; j++) {
1769 if (vp->rx_skbuff[j]) {
1770 dev_kfree_skb(vp->rx_skbuff[j]);
1771 vp->rx_skbuff[j] = NULL;
1772 }
1773 }
1774 retval = -ENOMEM; 1767 retval = -ENOMEM;
1775 goto err_free_irq; 1768 goto err_free_skb;
1776 } 1769 }
1777 /* Wrap the ring. */ 1770 /* Wrap the ring. */
1778 vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma); 1771 vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma);
@@ -1782,7 +1775,13 @@ vortex_open(struct net_device *dev)
1782 if (!retval) 1775 if (!retval)
1783 goto out; 1776 goto out;
1784 1777
1785err_free_irq: 1778err_free_skb:
1779 for (i = 0; i < RX_RING_SIZE; i++) {
1780 if (vp->rx_skbuff[i]) {
1781 dev_kfree_skb(vp->rx_skbuff[i]);
1782 vp->rx_skbuff[i] = NULL;
1783 }
1784 }
1786 free_irq(dev->irq, dev); 1785 free_irq(dev->irq, dev);
1787err: 1786err:
1788 if (vortex_debug > 1) 1787 if (vortex_debug > 1)
@@ -2382,6 +2381,7 @@ boomerang_interrupt(int irq, void *dev_id)
2382 void __iomem *ioaddr; 2381 void __iomem *ioaddr;
2383 int status; 2382 int status;
2384 int work_done = max_interrupt_work; 2383 int work_done = max_interrupt_work;
2384 int handled = 0;
2385 2385
2386 ioaddr = vp->ioaddr; 2386 ioaddr = vp->ioaddr;
2387 2387
@@ -2400,6 +2400,7 @@ boomerang_interrupt(int irq, void *dev_id)
2400 2400
2401 if ((status & IntLatch) == 0) 2401 if ((status & IntLatch) == 0)
2402 goto handler_exit; /* No interrupt: shared IRQs can cause this */ 2402 goto handler_exit; /* No interrupt: shared IRQs can cause this */
2403 handled = 1;
2403 2404
2404 if (status == 0xffff) { /* h/w no longer present (hotplug)? */ 2405 if (status == 0xffff) { /* h/w no longer present (hotplug)? */
2405 if (vortex_debug > 1) 2406 if (vortex_debug > 1)
@@ -2501,7 +2502,7 @@ boomerang_interrupt(int irq, void *dev_id)
2501handler_exit: 2502handler_exit:
2502 vp->handling_irq = 0; 2503 vp->handling_irq = 0;
2503 spin_unlock(&vp->lock); 2504 spin_unlock(&vp->lock);
2504 return IRQ_HANDLED; 2505 return IRQ_RETVAL(handled);
2505} 2506}
2506 2507
2507static int vortex_rx(struct net_device *dev) 2508static int vortex_rx(struct net_device *dev)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index 661cdaa7ea96..b3bc87fe3764 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -303,7 +303,8 @@ static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd,
303 get_page(pa->pages); 303 get_page(pa->pages);
304 bd->pa = *pa; 304 bd->pa = *pa;
305 305
306 bd->dma = pa->pages_dma + pa->pages_offset; 306 bd->dma_base = pa->pages_dma;
307 bd->dma_off = pa->pages_offset;
307 bd->dma_len = len; 308 bd->dma_len = len;
308 309
309 pa->pages_offset += len; 310 pa->pages_offset += len;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 506e832c9e9a..a4473d8ff4fa 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1110,6 +1110,7 @@ static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata,
1110 unsigned int rx_usecs = pdata->rx_usecs; 1110 unsigned int rx_usecs = pdata->rx_usecs;
1111 unsigned int rx_frames = pdata->rx_frames; 1111 unsigned int rx_frames = pdata->rx_frames;
1112 unsigned int inte; 1112 unsigned int inte;
1113 dma_addr_t hdr_dma, buf_dma;
1113 1114
1114 if (!rx_usecs && !rx_frames) { 1115 if (!rx_usecs && !rx_frames) {
1115 /* No coalescing, interrupt for every descriptor */ 1116 /* No coalescing, interrupt for every descriptor */
@@ -1129,10 +1130,12 @@ static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata,
1129 * Set buffer 2 (hi) address to buffer dma address (hi) and 1130 * Set buffer 2 (hi) address to buffer dma address (hi) and
1130 * set control bits OWN and INTE 1131 * set control bits OWN and INTE
1131 */ 1132 */
1132 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->rx.hdr.dma)); 1133 hdr_dma = rdata->rx.hdr.dma_base + rdata->rx.hdr.dma_off;
1133 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->rx.hdr.dma)); 1134 buf_dma = rdata->rx.buf.dma_base + rdata->rx.buf.dma_off;
1134 rdesc->desc2 = cpu_to_le32(lower_32_bits(rdata->rx.buf.dma)); 1135 rdesc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma));
1135 rdesc->desc3 = cpu_to_le32(upper_32_bits(rdata->rx.buf.dma)); 1136 rdesc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma));
1137 rdesc->desc2 = cpu_to_le32(lower_32_bits(buf_dma));
1138 rdesc->desc3 = cpu_to_le32(upper_32_bits(buf_dma));
1136 1139
1137 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte); 1140 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte);
1138 1141
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 1e9c28d19ef8..aae9d5ecd182 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1765,8 +1765,9 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
1765 /* Start with the header buffer which may contain just the header 1765 /* Start with the header buffer which may contain just the header
1766 * or the header plus data 1766 * or the header plus data
1767 */ 1767 */
1768 dma_sync_single_for_cpu(pdata->dev, rdata->rx.hdr.dma, 1768 dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base,
1769 rdata->rx.hdr.dma_len, DMA_FROM_DEVICE); 1769 rdata->rx.hdr.dma_off,
1770 rdata->rx.hdr.dma_len, DMA_FROM_DEVICE);
1770 1771
1771 packet = page_address(rdata->rx.hdr.pa.pages) + 1772 packet = page_address(rdata->rx.hdr.pa.pages) +
1772 rdata->rx.hdr.pa.pages_offset; 1773 rdata->rx.hdr.pa.pages_offset;
@@ -1778,8 +1779,11 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
1778 len -= copy_len; 1779 len -= copy_len;
1779 if (len) { 1780 if (len) {
1780 /* Add the remaining data as a frag */ 1781 /* Add the remaining data as a frag */
1781 dma_sync_single_for_cpu(pdata->dev, rdata->rx.buf.dma, 1782 dma_sync_single_range_for_cpu(pdata->dev,
1782 rdata->rx.buf.dma_len, DMA_FROM_DEVICE); 1783 rdata->rx.buf.dma_base,
1784 rdata->rx.buf.dma_off,
1785 rdata->rx.buf.dma_len,
1786 DMA_FROM_DEVICE);
1783 1787
1784 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 1788 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1785 rdata->rx.buf.pa.pages, 1789 rdata->rx.buf.pa.pages,
@@ -1945,8 +1949,9 @@ read_again:
1945 if (!skb) 1949 if (!skb)
1946 error = 1; 1950 error = 1;
1947 } else if (rdesc_len) { 1951 } else if (rdesc_len) {
1948 dma_sync_single_for_cpu(pdata->dev, 1952 dma_sync_single_range_for_cpu(pdata->dev,
1949 rdata->rx.buf.dma, 1953 rdata->rx.buf.dma_base,
1954 rdata->rx.buf.dma_off,
1950 rdata->rx.buf.dma_len, 1955 rdata->rx.buf.dma_len,
1951 DMA_FROM_DEVICE); 1956 DMA_FROM_DEVICE);
1952 1957
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 63d72a140053..717ce21b6077 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -337,7 +337,8 @@ struct xgbe_buffer_data {
337 struct xgbe_page_alloc pa; 337 struct xgbe_page_alloc pa;
338 struct xgbe_page_alloc pa_unmap; 338 struct xgbe_page_alloc pa_unmap;
339 339
340 dma_addr_t dma; 340 dma_addr_t dma_base;
341 unsigned long dma_off;
341 unsigned int dma_len; 342 unsigned int dma_len;
342}; 343};
343 344
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 909ad7a0d480..4566cdf0bc39 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1793,7 +1793,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
1793 macaddr = of_get_mac_address(dn); 1793 macaddr = of_get_mac_address(dn);
1794 if (!macaddr || !is_valid_ether_addr(macaddr)) { 1794 if (!macaddr || !is_valid_ether_addr(macaddr)) {
1795 dev_warn(&pdev->dev, "using random Ethernet MAC\n"); 1795 dev_warn(&pdev->dev, "using random Ethernet MAC\n");
1796 random_ether_addr(dev->dev_addr); 1796 eth_hw_addr_random(dev);
1797 } else { 1797 } else {
1798 ether_addr_copy(dev->dev_addr, macaddr); 1798 ether_addr_copy(dev->dev_addr, macaddr);
1799 } 1799 }
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index a90d7364334f..f7fbdc9d1325 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -262,9 +262,9 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
262 if (likely(skb)) { 262 if (likely(skb)) {
263 (*pkts_compl)++; 263 (*pkts_compl)++;
264 (*bytes_compl) += skb->len; 264 (*bytes_compl) += skb->len;
265 dev_kfree_skb_any(skb);
265 } 266 }
266 267
267 dev_kfree_skb_any(skb);
268 tx_buf->first_bd = 0; 268 tx_buf->first_bd = 0;
269 tx_buf->skb = NULL; 269 tx_buf->skb = NULL;
270 270
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 76b9052a961c..5907c821d131 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1718,6 +1718,22 @@ static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
1718 offset += sizeof(u32); 1718 offset += sizeof(u32);
1719 data_buf += sizeof(u32); 1719 data_buf += sizeof(u32);
1720 written_so_far += sizeof(u32); 1720 written_so_far += sizeof(u32);
1721
1722 /* At end of each 4Kb page, release nvram lock to allow MFW
1723 * chance to take it for its own use.
1724 */
1725 if ((cmd_flags & MCPR_NVM_COMMAND_LAST) &&
1726 (written_so_far < buf_size)) {
1727 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1728 "Releasing NVM lock after offset 0x%x\n",
1729 (u32)(offset - sizeof(u32)));
1730 bnx2x_release_nvram_lock(bp);
1731 usleep_range(1000, 2000);
1732 rc = bnx2x_acquire_nvram_lock(bp);
1733 if (rc)
1734 return rc;
1735 }
1736
1721 cmd_flags = 0; 1737 cmd_flags = 0;
1722 } 1738 }
1723 1739
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index b43b2cb9b830..64c1e9db6b0b 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1230,7 +1230,6 @@ static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
1230 new_skb = skb_realloc_headroom(skb, sizeof(*status)); 1230 new_skb = skb_realloc_headroom(skb, sizeof(*status));
1231 dev_kfree_skb(skb); 1231 dev_kfree_skb(skb);
1232 if (!new_skb) { 1232 if (!new_skb) {
1233 dev->stats.tx_errors++;
1234 dev->stats.tx_dropped++; 1233 dev->stats.tx_dropped++;
1235 return NULL; 1234 return NULL;
1236 } 1235 }
@@ -1465,7 +1464,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
1465 1464
1466 if (unlikely(!skb)) { 1465 if (unlikely(!skb)) {
1467 dev->stats.rx_dropped++; 1466 dev->stats.rx_dropped++;
1468 dev->stats.rx_errors++;
1469 goto next; 1467 goto next;
1470 } 1468 }
1471 1469
@@ -1493,7 +1491,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
1493 if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { 1491 if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
1494 netif_err(priv, rx_status, dev, 1492 netif_err(priv, rx_status, dev,
1495 "dropping fragmented packet!\n"); 1493 "dropping fragmented packet!\n");
1496 dev->stats.rx_dropped++;
1497 dev->stats.rx_errors++; 1494 dev->stats.rx_errors++;
1498 dev_kfree_skb_any(skb); 1495 dev_kfree_skb_any(skb);
1499 goto next; 1496 goto next;
@@ -1515,7 +1512,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
1515 dev->stats.rx_frame_errors++; 1512 dev->stats.rx_frame_errors++;
1516 if (dma_flag & DMA_RX_LG) 1513 if (dma_flag & DMA_RX_LG)
1517 dev->stats.rx_length_errors++; 1514 dev->stats.rx_length_errors++;
1518 dev->stats.rx_dropped++;
1519 dev->stats.rx_errors++; 1515 dev->stats.rx_errors++;
1520 dev_kfree_skb_any(skb); 1516 dev_kfree_skb_any(skb);
1521 goto next; 1517 goto next;
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index ac27e24264a5..f557a2aaec23 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -1508,16 +1508,7 @@ static void sbmac_channel_start(struct sbmac_softc *s)
1508 __raw_writeq(reg, port); 1508 __raw_writeq(reg, port);
1509 port = s->sbm_base + R_MAC_ETHERNET_ADDR; 1509 port = s->sbm_base + R_MAC_ETHERNET_ADDR;
1510 1510
1511#ifdef CONFIG_SB1_PASS_1_WORKAROUNDS
1512 /*
1513 * Pass1 SOCs do not receive packets addressed to the
1514 * destination address in the R_MAC_ETHERNET_ADDR register.
1515 * Set the value to zero.
1516 */
1517 __raw_writeq(0, port);
1518#else
1519 __raw_writeq(reg, port); 1511 __raw_writeq(reg, port);
1520#endif
1521 1512
1522 /* 1513 /*
1523 * Set the receive filter for no packets, and write values 1514 * Set the receive filter for no packets, and write values
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 0612b19f6313..506047c38607 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -676,6 +676,7 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
676 if (!next_cmpl->valid) 676 if (!next_cmpl->valid)
677 break; 677 break;
678 } 678 }
679 packets++;
679 680
680 /* TODO: BNA_CQ_EF_LOCAL ? */ 681 /* TODO: BNA_CQ_EF_LOCAL ? */
681 if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR | 682 if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
@@ -692,7 +693,6 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
692 else 693 else
693 bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len); 694 bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len);
694 695
695 packets++;
696 rcb->rxq->rx_packets++; 696 rcb->rxq->rx_packets++;
697 rcb->rxq->rx_bytes += totlen; 697 rcb->rxq->rx_bytes += totlen;
698 ccb->bytes_per_intr += totlen; 698 ccb->bytes_per_intr += totlen;
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index caeb39561567..bf9eb2ecf960 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -104,6 +104,57 @@ static void *macb_rx_buffer(struct macb *bp, unsigned int index)
104 return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index); 104 return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index);
105} 105}
106 106
107/* I/O accessors */
108static u32 hw_readl_native(struct macb *bp, int offset)
109{
110 return __raw_readl(bp->regs + offset);
111}
112
113static void hw_writel_native(struct macb *bp, int offset, u32 value)
114{
115 __raw_writel(value, bp->regs + offset);
116}
117
118static u32 hw_readl(struct macb *bp, int offset)
119{
120 return readl_relaxed(bp->regs + offset);
121}
122
123static void hw_writel(struct macb *bp, int offset, u32 value)
124{
125 writel_relaxed(value, bp->regs + offset);
126}
127
128/*
129 * Find the CPU endianness by using the loopback bit of NCR register. When the
130 * CPU is in big endian we need to program swaped mode for management
131 * descriptor access.
132 */
133static bool hw_is_native_io(void __iomem *addr)
134{
135 u32 value = MACB_BIT(LLB);
136
137 __raw_writel(value, addr + MACB_NCR);
138 value = __raw_readl(addr + MACB_NCR);
139
140 /* Write 0 back to disable everything */
141 __raw_writel(0, addr + MACB_NCR);
142
143 return value == MACB_BIT(LLB);
144}
145
146static bool hw_is_gem(void __iomem *addr, bool native_io)
147{
148 u32 id;
149
150 if (native_io)
151 id = __raw_readl(addr + MACB_MID);
152 else
153 id = readl_relaxed(addr + MACB_MID);
154
155 return MACB_BFEXT(IDNUM, id) >= 0x2;
156}
157
107static void macb_set_hwaddr(struct macb *bp) 158static void macb_set_hwaddr(struct macb *bp)
108{ 159{
109 u32 bottom; 160 u32 bottom;
@@ -160,7 +211,7 @@ static void macb_get_hwaddr(struct macb *bp)
160 } 211 }
161 } 212 }
162 213
163 netdev_info(bp->dev, "invalid hw address, using random\n"); 214 dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
164 eth_hw_addr_random(bp->dev); 215 eth_hw_addr_random(bp->dev);
165} 216}
166 217
@@ -252,7 +303,6 @@ static void macb_handle_link_change(struct net_device *dev)
252 struct macb *bp = netdev_priv(dev); 303 struct macb *bp = netdev_priv(dev);
253 struct phy_device *phydev = bp->phy_dev; 304 struct phy_device *phydev = bp->phy_dev;
254 unsigned long flags; 305 unsigned long flags;
255
256 int status_change = 0; 306 int status_change = 0;
257 307
258 spin_lock_irqsave(&bp->lock, flags); 308 spin_lock_irqsave(&bp->lock, flags);
@@ -449,14 +499,14 @@ err_out:
449 499
450static void macb_update_stats(struct macb *bp) 500static void macb_update_stats(struct macb *bp)
451{ 501{
452 u32 __iomem *reg = bp->regs + MACB_PFR;
453 u32 *p = &bp->hw_stats.macb.rx_pause_frames; 502 u32 *p = &bp->hw_stats.macb.rx_pause_frames;
454 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1; 503 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
504 int offset = MACB_PFR;
455 505
456 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); 506 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
457 507
458 for(; p < end; p++, reg++) 508 for(; p < end; p++, offset += 4)
459 *p += readl_relaxed(reg); 509 *p += bp->macb_reg_readl(bp, offset);
460} 510}
461 511
462static int macb_halt_tx(struct macb *bp) 512static int macb_halt_tx(struct macb *bp)
@@ -1107,12 +1157,6 @@ static void macb_poll_controller(struct net_device *dev)
1107} 1157}
1108#endif 1158#endif
1109 1159
1110static inline unsigned int macb_count_tx_descriptors(struct macb *bp,
1111 unsigned int len)
1112{
1113 return (len + bp->max_tx_length - 1) / bp->max_tx_length;
1114}
1115
1116static unsigned int macb_tx_map(struct macb *bp, 1160static unsigned int macb_tx_map(struct macb *bp,
1117 struct macb_queue *queue, 1161 struct macb_queue *queue,
1118 struct sk_buff *skb) 1162 struct sk_buff *skb)
@@ -1263,11 +1307,11 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
1263 * socket buffer: skb fragments of jumbo frames may need to be 1307 * socket buffer: skb fragments of jumbo frames may need to be
1264 * splitted into many buffer descriptors. 1308 * splitted into many buffer descriptors.
1265 */ 1309 */
1266 count = macb_count_tx_descriptors(bp, skb_headlen(skb)); 1310 count = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
1267 nr_frags = skb_shinfo(skb)->nr_frags; 1311 nr_frags = skb_shinfo(skb)->nr_frags;
1268 for (f = 0; f < nr_frags; f++) { 1312 for (f = 0; f < nr_frags; f++) {
1269 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]); 1313 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
1270 count += macb_count_tx_descriptors(bp, frag_size); 1314 count += DIV_ROUND_UP(frag_size, bp->max_tx_length);
1271 } 1315 }
1272 1316
1273 spin_lock_irqsave(&bp->lock, flags); 1317 spin_lock_irqsave(&bp->lock, flags);
@@ -1603,7 +1647,6 @@ static u32 macb_dbw(struct macb *bp)
1603static void macb_configure_dma(struct macb *bp) 1647static void macb_configure_dma(struct macb *bp)
1604{ 1648{
1605 u32 dmacfg; 1649 u32 dmacfg;
1606 u32 tmp, ncr;
1607 1650
1608 if (macb_is_gem(bp)) { 1651 if (macb_is_gem(bp)) {
1609 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); 1652 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
@@ -1613,22 +1656,11 @@ static void macb_configure_dma(struct macb *bp)
1613 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L); 1656 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
1614 dmacfg &= ~GEM_BIT(ENDIA_PKT); 1657 dmacfg &= ~GEM_BIT(ENDIA_PKT);
1615 1658
1616 /* Find the CPU endianness by using the loopback bit of net_ctrl 1659 if (bp->native_io)
1617 * register. save it first. When the CPU is in big endian we
1618 * need to program swaped mode for management descriptor access.
1619 */
1620 ncr = macb_readl(bp, NCR);
1621 __raw_writel(MACB_BIT(LLB), bp->regs + MACB_NCR);
1622 tmp = __raw_readl(bp->regs + MACB_NCR);
1623
1624 if (tmp == MACB_BIT(LLB))
1625 dmacfg &= ~GEM_BIT(ENDIA_DESC); 1660 dmacfg &= ~GEM_BIT(ENDIA_DESC);
1626 else 1661 else
1627 dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */ 1662 dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */
1628 1663
1629 /* Restore net_ctrl */
1630 macb_writel(bp, NCR, ncr);
1631
1632 if (bp->dev->features & NETIF_F_HW_CSUM) 1664 if (bp->dev->features & NETIF_F_HW_CSUM)
1633 dmacfg |= GEM_BIT(TXCOEN); 1665 dmacfg |= GEM_BIT(TXCOEN);
1634 else 1666 else
@@ -1897,19 +1929,19 @@ static int macb_change_mtu(struct net_device *dev, int new_mtu)
1897 1929
1898static void gem_update_stats(struct macb *bp) 1930static void gem_update_stats(struct macb *bp)
1899{ 1931{
1900 int i; 1932 unsigned int i;
1901 u32 *p = &bp->hw_stats.gem.tx_octets_31_0; 1933 u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
1902 1934
1903 for (i = 0; i < GEM_STATS_LEN; ++i, ++p) { 1935 for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
1904 u32 offset = gem_statistics[i].offset; 1936 u32 offset = gem_statistics[i].offset;
1905 u64 val = readl_relaxed(bp->regs + offset); 1937 u64 val = bp->macb_reg_readl(bp, offset);
1906 1938
1907 bp->ethtool_stats[i] += val; 1939 bp->ethtool_stats[i] += val;
1908 *p += val; 1940 *p += val;
1909 1941
1910 if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) { 1942 if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
1911 /* Add GEM_OCTTXH, GEM_OCTRXH */ 1943 /* Add GEM_OCTTXH, GEM_OCTRXH */
1912 val = readl_relaxed(bp->regs + offset + 4); 1944 val = bp->macb_reg_readl(bp, offset + 4);
1913 bp->ethtool_stats[i] += ((u64)val) << 32; 1945 bp->ethtool_stats[i] += ((u64)val) << 32;
1914 *(++p) += val; 1946 *(++p) += val;
1915 } 1947 }
@@ -1976,7 +2008,7 @@ static int gem_get_sset_count(struct net_device *dev, int sset)
1976 2008
1977static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p) 2009static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
1978{ 2010{
1979 int i; 2011 unsigned int i;
1980 2012
1981 switch (sset) { 2013 switch (sset) {
1982 case ETH_SS_STATS: 2014 case ETH_SS_STATS:
@@ -2190,7 +2222,7 @@ static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_co
2190 if (dt_conf) 2222 if (dt_conf)
2191 bp->caps = dt_conf->caps; 2223 bp->caps = dt_conf->caps;
2192 2224
2193 if (macb_is_gem_hw(bp->regs)) { 2225 if (hw_is_gem(bp->regs, bp->native_io)) {
2194 bp->caps |= MACB_CAPS_MACB_IS_GEM; 2226 bp->caps |= MACB_CAPS_MACB_IS_GEM;
2195 2227
2196 dcfg = gem_readl(bp, DCFG1); 2228 dcfg = gem_readl(bp, DCFG1);
@@ -2201,10 +2233,11 @@ static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_co
2201 bp->caps |= MACB_CAPS_FIFO_MODE; 2233 bp->caps |= MACB_CAPS_FIFO_MODE;
2202 } 2234 }
2203 2235
2204 netdev_dbg(bp->dev, "Cadence caps 0x%08x\n", bp->caps); 2236 dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
2205} 2237}
2206 2238
2207static void macb_probe_queues(void __iomem *mem, 2239static void macb_probe_queues(void __iomem *mem,
2240 bool native_io,
2208 unsigned int *queue_mask, 2241 unsigned int *queue_mask,
2209 unsigned int *num_queues) 2242 unsigned int *num_queues)
2210{ 2243{
@@ -2219,7 +2252,7 @@ static void macb_probe_queues(void __iomem *mem,
2219 * we are early in the probe process and don't have the 2252 * we are early in the probe process and don't have the
2220 * MACB_CAPS_MACB_IS_GEM flag positioned 2253 * MACB_CAPS_MACB_IS_GEM flag positioned
2221 */ 2254 */
2222 if (!macb_is_gem_hw(mem)) 2255 if (!hw_is_gem(mem, native_io))
2223 return; 2256 return;
2224 2257
2225 /* bit 0 is never set but queue 0 always exists */ 2258 /* bit 0 is never set but queue 0 always exists */
@@ -2786,6 +2819,7 @@ static int macb_probe(struct platform_device *pdev)
2786 struct clk *pclk, *hclk, *tx_clk; 2819 struct clk *pclk, *hclk, *tx_clk;
2787 unsigned int queue_mask, num_queues; 2820 unsigned int queue_mask, num_queues;
2788 struct macb_platform_data *pdata; 2821 struct macb_platform_data *pdata;
2822 bool native_io;
2789 struct phy_device *phydev; 2823 struct phy_device *phydev;
2790 struct net_device *dev; 2824 struct net_device *dev;
2791 struct resource *regs; 2825 struct resource *regs;
@@ -2794,6 +2828,11 @@ static int macb_probe(struct platform_device *pdev)
2794 struct macb *bp; 2828 struct macb *bp;
2795 int err; 2829 int err;
2796 2830
2831 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2832 mem = devm_ioremap_resource(&pdev->dev, regs);
2833 if (IS_ERR(mem))
2834 return PTR_ERR(mem);
2835
2797 if (np) { 2836 if (np) {
2798 const struct of_device_id *match; 2837 const struct of_device_id *match;
2799 2838
@@ -2809,14 +2848,9 @@ static int macb_probe(struct platform_device *pdev)
2809 if (err) 2848 if (err)
2810 return err; 2849 return err;
2811 2850
2812 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2851 native_io = hw_is_native_io(mem);
2813 mem = devm_ioremap_resource(&pdev->dev, regs);
2814 if (IS_ERR(mem)) {
2815 err = PTR_ERR(mem);
2816 goto err_disable_clocks;
2817 }
2818 2852
2819 macb_probe_queues(mem, &queue_mask, &num_queues); 2853 macb_probe_queues(mem, native_io, &queue_mask, &num_queues);
2820 dev = alloc_etherdev_mq(sizeof(*bp), num_queues); 2854 dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
2821 if (!dev) { 2855 if (!dev) {
2822 err = -ENOMEM; 2856 err = -ENOMEM;
@@ -2831,6 +2865,14 @@ static int macb_probe(struct platform_device *pdev)
2831 bp->pdev = pdev; 2865 bp->pdev = pdev;
2832 bp->dev = dev; 2866 bp->dev = dev;
2833 bp->regs = mem; 2867 bp->regs = mem;
2868 bp->native_io = native_io;
2869 if (native_io) {
2870 bp->macb_reg_readl = hw_readl_native;
2871 bp->macb_reg_writel = hw_writel_native;
2872 } else {
2873 bp->macb_reg_readl = hw_readl;
2874 bp->macb_reg_writel = hw_writel;
2875 }
2834 bp->num_queues = num_queues; 2876 bp->num_queues = num_queues;
2835 bp->queue_mask = queue_mask; 2877 bp->queue_mask = queue_mask;
2836 if (macb_config) 2878 if (macb_config)
@@ -2838,9 +2880,8 @@ static int macb_probe(struct platform_device *pdev)
2838 bp->pclk = pclk; 2880 bp->pclk = pclk;
2839 bp->hclk = hclk; 2881 bp->hclk = hclk;
2840 bp->tx_clk = tx_clk; 2882 bp->tx_clk = tx_clk;
2841 if (macb_config->jumbo_max_len) { 2883 if (macb_config)
2842 bp->jumbo_max_len = macb_config->jumbo_max_len; 2884 bp->jumbo_max_len = macb_config->jumbo_max_len;
2843 }
2844 2885
2845 spin_lock_init(&bp->lock); 2886 spin_lock_init(&bp->lock);
2846 2887
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index d74655993d4b..1895b6b2addd 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -429,18 +429,12 @@
429 | GEM_BF(name, value)) 429 | GEM_BF(name, value))
430 430
431/* Register access macros */ 431/* Register access macros */
432#define macb_readl(port,reg) \ 432#define macb_readl(port, reg) (port)->macb_reg_readl((port), MACB_##reg)
433 readl_relaxed((port)->regs + MACB_##reg) 433#define macb_writel(port, reg, value) (port)->macb_reg_writel((port), MACB_##reg, (value))
434#define macb_writel(port,reg,value) \ 434#define gem_readl(port, reg) (port)->macb_reg_readl((port), GEM_##reg)
435 writel_relaxed((value), (port)->regs + MACB_##reg) 435#define gem_writel(port, reg, value) (port)->macb_reg_writel((port), GEM_##reg, (value))
436#define gem_readl(port, reg) \ 436#define queue_readl(queue, reg) (queue)->bp->macb_reg_readl((queue)->bp, (queue)->reg)
437 readl_relaxed((port)->regs + GEM_##reg) 437#define queue_writel(queue, reg, value) (queue)->bp->macb_reg_writel((queue)->bp, (queue)->reg, (value))
438#define gem_writel(port, reg, value) \
439 writel_relaxed((value), (port)->regs + GEM_##reg)
440#define queue_readl(queue, reg) \
441 readl_relaxed((queue)->bp->regs + (queue)->reg)
442#define queue_writel(queue, reg, value) \
443 writel_relaxed((value), (queue)->bp->regs + (queue)->reg)
444 438
445/* Conditional GEM/MACB macros. These perform the operation to the correct 439/* Conditional GEM/MACB macros. These perform the operation to the correct
446 * register dependent on whether the device is a GEM or a MACB. For registers 440 * register dependent on whether the device is a GEM or a MACB. For registers
@@ -785,6 +779,11 @@ struct macb_queue {
785 779
786struct macb { 780struct macb {
787 void __iomem *regs; 781 void __iomem *regs;
782 bool native_io;
783
784 /* hardware IO accessors */
785 u32 (*macb_reg_readl)(struct macb *bp, int offset);
786 void (*macb_reg_writel)(struct macb *bp, int offset, u32 value);
788 787
789 unsigned int rx_tail; 788 unsigned int rx_tail;
790 unsigned int rx_prepared_head; 789 unsigned int rx_prepared_head;
@@ -817,9 +816,9 @@ struct macb {
817 816
818 struct mii_bus *mii_bus; 817 struct mii_bus *mii_bus;
819 struct phy_device *phy_dev; 818 struct phy_device *phy_dev;
820 unsigned int link; 819 int link;
821 unsigned int speed; 820 int speed;
822 unsigned int duplex; 821 int duplex;
823 822
824 u32 caps; 823 u32 caps;
825 unsigned int dma_burst_length; 824 unsigned int dma_burst_length;
@@ -843,9 +842,4 @@ static inline bool macb_is_gem(struct macb *bp)
843 return !!(bp->caps & MACB_CAPS_MACB_IS_GEM); 842 return !!(bp->caps & MACB_CAPS_MACB_IS_GEM);
844} 843}
845 844
846static inline bool macb_is_gem_hw(void __iomem *addr)
847{
848 return !!(MACB_BFEXT(IDNUM, readl_relaxed(addr + MACB_MID)) >= 0x2);
849}
850
851#endif /* _MACB_H */ 845#endif /* _MACB_H */
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index c4d6bbe9458d..02e23e6f1424 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -16,7 +16,6 @@ if NET_VENDOR_CAVIUM
16config THUNDER_NIC_PF 16config THUNDER_NIC_PF
17 tristate "Thunder Physical function driver" 17 tristate "Thunder Physical function driver"
18 depends on 64BIT 18 depends on 64BIT
19 default ARCH_THUNDER
20 select THUNDER_NIC_BGX 19 select THUNDER_NIC_BGX
21 ---help--- 20 ---help---
22 This driver supports Thunder's NIC physical function. 21 This driver supports Thunder's NIC physical function.
@@ -29,14 +28,12 @@ config THUNDER_NIC_PF
29config THUNDER_NIC_VF 28config THUNDER_NIC_VF
30 tristate "Thunder Virtual function driver" 29 tristate "Thunder Virtual function driver"
31 depends on 64BIT 30 depends on 64BIT
32 default ARCH_THUNDER
33 ---help--- 31 ---help---
34 This driver supports Thunder's NIC virtual function 32 This driver supports Thunder's NIC virtual function
35 33
36config THUNDER_NIC_BGX 34config THUNDER_NIC_BGX
37 tristate "Thunder MAC interface driver (BGX)" 35 tristate "Thunder MAC interface driver (BGX)"
38 depends on 64BIT 36 depends on 64BIT
39 default ARCH_THUNDER
40 ---help--- 37 ---help---
41 This driver supports programming and controlling of MAC 38 This driver supports programming and controlling of MAC
42 interface from NIC physical function driver. 39 interface from NIC physical function driver.
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index dda8a02b7322..8aee250904ec 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -125,6 +125,15 @@
125 */ 125 */
126#define NICPF_CLK_PER_INT_TICK 2 126#define NICPF_CLK_PER_INT_TICK 2
127 127
128/* Time to wait before we decide that a SQ is stuck.
129 *
130 * Since both pkt rx and tx notifications are done with same CQ,
131 * when packets are being received at very high rate (eg: L2 forwarding)
132 * then freeing transmitted skbs will be delayed and watchdog
133 * will kick in, resetting interface. Hence keeping this value high.
134 */
135#define NICVF_TX_TIMEOUT (50 * HZ)
136
128struct nicvf_cq_poll { 137struct nicvf_cq_poll {
129 u8 cq_idx; /* Completion queue index */ 138 u8 cq_idx; /* Completion queue index */
130 struct napi_struct napi; 139 struct napi_struct napi;
@@ -216,8 +225,9 @@ struct nicvf_drv_stats {
216 /* Tx */ 225 /* Tx */
217 u64 tx_frames_ok; 226 u64 tx_frames_ok;
218 u64 tx_drops; 227 u64 tx_drops;
219 u64 tx_busy;
220 u64 tx_tso; 228 u64 tx_tso;
229 u64 txq_stop;
230 u64 txq_wake;
221}; 231};
222 232
223struct nicvf { 233struct nicvf {
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index 16bd2d772db9..a4228e664567 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -66,9 +66,10 @@ static const struct nicvf_stat nicvf_drv_stats[] = {
66 NICVF_DRV_STAT(rx_frames_jumbo), 66 NICVF_DRV_STAT(rx_frames_jumbo),
67 NICVF_DRV_STAT(rx_drops), 67 NICVF_DRV_STAT(rx_drops),
68 NICVF_DRV_STAT(tx_frames_ok), 68 NICVF_DRV_STAT(tx_frames_ok),
69 NICVF_DRV_STAT(tx_busy),
70 NICVF_DRV_STAT(tx_tso), 69 NICVF_DRV_STAT(tx_tso),
71 NICVF_DRV_STAT(tx_drops), 70 NICVF_DRV_STAT(tx_drops),
71 NICVF_DRV_STAT(txq_stop),
72 NICVF_DRV_STAT(txq_wake),
72}; 73};
73 74
74static const struct nicvf_stat nicvf_queue_stats[] = { 75static const struct nicvf_stat nicvf_queue_stats[] = {
@@ -126,6 +127,7 @@ static void nicvf_set_msglevel(struct net_device *netdev, u32 lvl)
126 127
127static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data) 128static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
128{ 129{
130 struct nicvf *nic = netdev_priv(netdev);
129 int stats, qidx; 131 int stats, qidx;
130 132
131 if (sset != ETH_SS_STATS) 133 if (sset != ETH_SS_STATS)
@@ -141,7 +143,7 @@ static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
141 data += ETH_GSTRING_LEN; 143 data += ETH_GSTRING_LEN;
142 } 144 }
143 145
144 for (qidx = 0; qidx < MAX_RCV_QUEUES_PER_QS; qidx++) { 146 for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
145 for (stats = 0; stats < nicvf_n_queue_stats; stats++) { 147 for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
146 sprintf(data, "rxq%d: %s", qidx, 148 sprintf(data, "rxq%d: %s", qidx,
147 nicvf_queue_stats[stats].name); 149 nicvf_queue_stats[stats].name);
@@ -149,7 +151,7 @@ static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
149 } 151 }
150 } 152 }
151 153
152 for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) { 154 for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
153 for (stats = 0; stats < nicvf_n_queue_stats; stats++) { 155 for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
154 sprintf(data, "txq%d: %s", qidx, 156 sprintf(data, "txq%d: %s", qidx,
155 nicvf_queue_stats[stats].name); 157 nicvf_queue_stats[stats].name);
@@ -170,12 +172,14 @@ static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
170 172
171static int nicvf_get_sset_count(struct net_device *netdev, int sset) 173static int nicvf_get_sset_count(struct net_device *netdev, int sset)
172{ 174{
175 struct nicvf *nic = netdev_priv(netdev);
176
173 if (sset != ETH_SS_STATS) 177 if (sset != ETH_SS_STATS)
174 return -EINVAL; 178 return -EINVAL;
175 179
176 return nicvf_n_hw_stats + nicvf_n_drv_stats + 180 return nicvf_n_hw_stats + nicvf_n_drv_stats +
177 (nicvf_n_queue_stats * 181 (nicvf_n_queue_stats *
178 (MAX_RCV_QUEUES_PER_QS + MAX_SND_QUEUES_PER_QS)) + 182 (nic->qs->rq_cnt + nic->qs->sq_cnt)) +
179 BGX_RX_STATS_COUNT + BGX_TX_STATS_COUNT; 183 BGX_RX_STATS_COUNT + BGX_TX_STATS_COUNT;
180} 184}
181 185
@@ -197,13 +201,13 @@ static void nicvf_get_ethtool_stats(struct net_device *netdev,
197 *(data++) = ((u64 *)&nic->drv_stats) 201 *(data++) = ((u64 *)&nic->drv_stats)
198 [nicvf_drv_stats[stat].index]; 202 [nicvf_drv_stats[stat].index];
199 203
200 for (qidx = 0; qidx < MAX_RCV_QUEUES_PER_QS; qidx++) { 204 for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
201 for (stat = 0; stat < nicvf_n_queue_stats; stat++) 205 for (stat = 0; stat < nicvf_n_queue_stats; stat++)
202 *(data++) = ((u64 *)&nic->qs->rq[qidx].stats) 206 *(data++) = ((u64 *)&nic->qs->rq[qidx].stats)
203 [nicvf_queue_stats[stat].index]; 207 [nicvf_queue_stats[stat].index];
204 } 208 }
205 209
206 for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) { 210 for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
207 for (stat = 0; stat < nicvf_n_queue_stats; stat++) 211 for (stat = 0; stat < nicvf_n_queue_stats; stat++)
208 *(data++) = ((u64 *)&nic->qs->sq[qidx].stats) 212 *(data++) = ((u64 *)&nic->qs->sq[qidx].stats)
209 [nicvf_queue_stats[stat].index]; 213 [nicvf_queue_stats[stat].index];
@@ -543,6 +547,7 @@ static int nicvf_set_channels(struct net_device *dev,
543{ 547{
544 struct nicvf *nic = netdev_priv(dev); 548 struct nicvf *nic = netdev_priv(dev);
545 int err = 0; 549 int err = 0;
550 bool if_up = netif_running(dev);
546 551
547 if (!channel->rx_count || !channel->tx_count) 552 if (!channel->rx_count || !channel->tx_count)
548 return -EINVAL; 553 return -EINVAL;
@@ -551,6 +556,9 @@ static int nicvf_set_channels(struct net_device *dev,
551 if (channel->tx_count > MAX_SND_QUEUES_PER_QS) 556 if (channel->tx_count > MAX_SND_QUEUES_PER_QS)
552 return -EINVAL; 557 return -EINVAL;
553 558
559 if (if_up)
560 nicvf_stop(dev);
561
554 nic->qs->rq_cnt = channel->rx_count; 562 nic->qs->rq_cnt = channel->rx_count;
555 nic->qs->sq_cnt = channel->tx_count; 563 nic->qs->sq_cnt = channel->tx_count;
556 nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt); 564 nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt);
@@ -559,11 +567,9 @@ static int nicvf_set_channels(struct net_device *dev,
559 if (err) 567 if (err)
560 return err; 568 return err;
561 569
562 if (!netif_running(dev)) 570 if (if_up)
563 return err; 571 nicvf_open(dev);
564 572
565 nicvf_stop(dev);
566 nicvf_open(dev);
567 netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n", 573 netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
568 nic->qs->sq_cnt, nic->qs->rq_cnt); 574 nic->qs->sq_cnt, nic->qs->rq_cnt);
569 575
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 8b119a035b7e..3b90afb8c293 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -234,7 +234,7 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic)
234 nic->duplex == DUPLEX_FULL ? 234 nic->duplex == DUPLEX_FULL ?
235 "Full duplex" : "Half duplex"); 235 "Full duplex" : "Half duplex");
236 netif_carrier_on(nic->netdev); 236 netif_carrier_on(nic->netdev);
237 netif_tx_wake_all_queues(nic->netdev); 237 netif_tx_start_all_queues(nic->netdev);
238 } else { 238 } else {
239 netdev_info(nic->netdev, "%s: Link is Down\n", 239 netdev_info(nic->netdev, "%s: Link is Down\n",
240 nic->netdev->name); 240 nic->netdev->name);
@@ -425,6 +425,7 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
425 if (skb) { 425 if (skb) {
426 prefetch(skb); 426 prefetch(skb);
427 dev_consume_skb_any(skb); 427 dev_consume_skb_any(skb);
428 sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
428 } 429 }
429} 430}
430 431
@@ -476,12 +477,13 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
476static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx, 477static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
477 struct napi_struct *napi, int budget) 478 struct napi_struct *napi, int budget)
478{ 479{
479 int processed_cqe, work_done = 0; 480 int processed_cqe, work_done = 0, tx_done = 0;
480 int cqe_count, cqe_head; 481 int cqe_count, cqe_head;
481 struct nicvf *nic = netdev_priv(netdev); 482 struct nicvf *nic = netdev_priv(netdev);
482 struct queue_set *qs = nic->qs; 483 struct queue_set *qs = nic->qs;
483 struct cmp_queue *cq = &qs->cq[cq_idx]; 484 struct cmp_queue *cq = &qs->cq[cq_idx];
484 struct cqe_rx_t *cq_desc; 485 struct cqe_rx_t *cq_desc;
486 struct netdev_queue *txq;
485 487
486 spin_lock_bh(&cq->lock); 488 spin_lock_bh(&cq->lock);
487loop: 489loop:
@@ -496,8 +498,8 @@ loop:
496 cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9; 498 cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9;
497 cqe_head &= 0xFFFF; 499 cqe_head &= 0xFFFF;
498 500
499 netdev_dbg(nic->netdev, "%s cqe_count %d cqe_head %d\n", 501 netdev_dbg(nic->netdev, "%s CQ%d cqe_count %d cqe_head %d\n",
500 __func__, cqe_count, cqe_head); 502 __func__, cq_idx, cqe_count, cqe_head);
501 while (processed_cqe < cqe_count) { 503 while (processed_cqe < cqe_count) {
502 /* Get the CQ descriptor */ 504 /* Get the CQ descriptor */
503 cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head); 505 cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
@@ -511,8 +513,8 @@ loop:
511 break; 513 break;
512 } 514 }
513 515
514 netdev_dbg(nic->netdev, "cq_desc->cqe_type %d\n", 516 netdev_dbg(nic->netdev, "CQ%d cq_desc->cqe_type %d\n",
515 cq_desc->cqe_type); 517 cq_idx, cq_desc->cqe_type);
516 switch (cq_desc->cqe_type) { 518 switch (cq_desc->cqe_type) {
517 case CQE_TYPE_RX: 519 case CQE_TYPE_RX:
518 nicvf_rcv_pkt_handler(netdev, napi, cq, 520 nicvf_rcv_pkt_handler(netdev, napi, cq,
@@ -522,6 +524,7 @@ loop:
522 case CQE_TYPE_SEND: 524 case CQE_TYPE_SEND:
523 nicvf_snd_pkt_handler(netdev, cq, 525 nicvf_snd_pkt_handler(netdev, cq,
524 (void *)cq_desc, CQE_TYPE_SEND); 526 (void *)cq_desc, CQE_TYPE_SEND);
527 tx_done++;
525 break; 528 break;
526 case CQE_TYPE_INVALID: 529 case CQE_TYPE_INVALID:
527 case CQE_TYPE_RX_SPLIT: 530 case CQE_TYPE_RX_SPLIT:
@@ -532,8 +535,9 @@ loop:
532 } 535 }
533 processed_cqe++; 536 processed_cqe++;
534 } 537 }
535 netdev_dbg(nic->netdev, "%s processed_cqe %d work_done %d budget %d\n", 538 netdev_dbg(nic->netdev,
536 __func__, processed_cqe, work_done, budget); 539 "%s CQ%d processed_cqe %d work_done %d budget %d\n",
540 __func__, cq_idx, processed_cqe, work_done, budget);
537 541
538 /* Ring doorbell to inform H/W to reuse processed CQEs */ 542 /* Ring doorbell to inform H/W to reuse processed CQEs */
539 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR, 543 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR,
@@ -543,6 +547,19 @@ loop:
543 goto loop; 547 goto loop;
544 548
545done: 549done:
550 /* Wakeup TXQ if its stopped earlier due to SQ full */
551 if (tx_done) {
552 txq = netdev_get_tx_queue(netdev, cq_idx);
553 if (netif_tx_queue_stopped(txq)) {
554 netif_tx_start_queue(txq);
555 nic->drv_stats.txq_wake++;
556 if (netif_msg_tx_err(nic))
557 netdev_warn(netdev,
558 "%s: Transmit queue wakeup SQ%d\n",
559 netdev->name, cq_idx);
560 }
561 }
562
546 spin_unlock_bh(&cq->lock); 563 spin_unlock_bh(&cq->lock);
547 return work_done; 564 return work_done;
548} 565}
@@ -554,15 +571,10 @@ static int nicvf_poll(struct napi_struct *napi, int budget)
554 struct net_device *netdev = napi->dev; 571 struct net_device *netdev = napi->dev;
555 struct nicvf *nic = netdev_priv(netdev); 572 struct nicvf *nic = netdev_priv(netdev);
556 struct nicvf_cq_poll *cq; 573 struct nicvf_cq_poll *cq;
557 struct netdev_queue *txq;
558 574
559 cq = container_of(napi, struct nicvf_cq_poll, napi); 575 cq = container_of(napi, struct nicvf_cq_poll, napi);
560 work_done = nicvf_cq_intr_handler(netdev, cq->cq_idx, napi, budget); 576 work_done = nicvf_cq_intr_handler(netdev, cq->cq_idx, napi, budget);
561 577
562 txq = netdev_get_tx_queue(netdev, cq->cq_idx);
563 if (netif_tx_queue_stopped(txq))
564 netif_tx_wake_queue(txq);
565
566 if (work_done < budget) { 578 if (work_done < budget) {
567 /* Slow packet rate, exit polling */ 579 /* Slow packet rate, exit polling */
568 napi_complete(napi); 580 napi_complete(napi);
@@ -833,9 +845,9 @@ static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
833 return NETDEV_TX_OK; 845 return NETDEV_TX_OK;
834 } 846 }
835 847
836 if (!nicvf_sq_append_skb(nic, skb) && !netif_tx_queue_stopped(txq)) { 848 if (!netif_tx_queue_stopped(txq) && !nicvf_sq_append_skb(nic, skb)) {
837 netif_tx_stop_queue(txq); 849 netif_tx_stop_queue(txq);
838 nic->drv_stats.tx_busy++; 850 nic->drv_stats.txq_stop++;
839 if (netif_msg_tx_err(nic)) 851 if (netif_msg_tx_err(nic))
840 netdev_warn(netdev, 852 netdev_warn(netdev,
841 "%s: Transmit ring full, stopping SQ%d\n", 853 "%s: Transmit ring full, stopping SQ%d\n",
@@ -859,7 +871,6 @@ int nicvf_stop(struct net_device *netdev)
859 nicvf_send_msg_to_pf(nic, &mbx); 871 nicvf_send_msg_to_pf(nic, &mbx);
860 872
861 netif_carrier_off(netdev); 873 netif_carrier_off(netdev);
862 netif_tx_disable(netdev);
863 874
864 /* Disable RBDR & QS error interrupts */ 875 /* Disable RBDR & QS error interrupts */
865 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { 876 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
@@ -894,6 +905,8 @@ int nicvf_stop(struct net_device *netdev)
894 kfree(cq_poll); 905 kfree(cq_poll);
895 } 906 }
896 907
908 netif_tx_disable(netdev);
909
897 /* Free resources */ 910 /* Free resources */
898 nicvf_config_data_transfer(nic, false); 911 nicvf_config_data_transfer(nic, false);
899 912
@@ -988,6 +1001,9 @@ int nicvf_open(struct net_device *netdev)
988 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 1001 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
989 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx); 1002 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
990 1003
1004 nic->drv_stats.txq_stop = 0;
1005 nic->drv_stats.txq_wake = 0;
1006
991 netif_carrier_on(netdev); 1007 netif_carrier_on(netdev);
992 netif_tx_start_all_queues(netdev); 1008 netif_tx_start_all_queues(netdev);
993 1009
@@ -1278,6 +1294,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1278 netdev->hw_features = netdev->features; 1294 netdev->hw_features = netdev->features;
1279 1295
1280 netdev->netdev_ops = &nicvf_netdev_ops; 1296 netdev->netdev_ops = &nicvf_netdev_ops;
1297 netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
1281 1298
1282 INIT_WORK(&nic->reset_task, nicvf_reset_task); 1299 INIT_WORK(&nic->reset_task, nicvf_reset_task);
1283 1300
@@ -1318,11 +1335,17 @@ static void nicvf_remove(struct pci_dev *pdev)
1318 pci_disable_device(pdev); 1335 pci_disable_device(pdev);
1319} 1336}
1320 1337
1338static void nicvf_shutdown(struct pci_dev *pdev)
1339{
1340 nicvf_remove(pdev);
1341}
1342
1321static struct pci_driver nicvf_driver = { 1343static struct pci_driver nicvf_driver = {
1322 .name = DRV_NAME, 1344 .name = DRV_NAME,
1323 .id_table = nicvf_id_table, 1345 .id_table = nicvf_id_table,
1324 .probe = nicvf_probe, 1346 .probe = nicvf_probe,
1325 .remove = nicvf_remove, 1347 .remove = nicvf_remove,
1348 .shutdown = nicvf_shutdown,
1326}; 1349};
1327 1350
1328static int __init nicvf_init_module(void) 1351static int __init nicvf_init_module(void)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index d69d228d11a0..ca4240aa6d15 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -103,9 +103,11 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
103 103
104 /* Allocate a new page */ 104 /* Allocate a new page */
105 if (!nic->rb_page) { 105 if (!nic->rb_page) {
106 nic->rb_page = alloc_pages(gfp | __GFP_COMP, order); 106 nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
107 order);
107 if (!nic->rb_page) { 108 if (!nic->rb_page) {
108 netdev_err(nic->netdev, "Failed to allocate new rcv buffer\n"); 109 netdev_err(nic->netdev,
110 "Failed to allocate new rcv buffer\n");
109 return -ENOMEM; 111 return -ENOMEM;
110 } 112 }
111 nic->rb_page_offset = 0; 113 nic->rb_page_offset = 0;
@@ -382,7 +384,8 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
382 return; 384 return;
383 385
384 if (sq->tso_hdrs) 386 if (sq->tso_hdrs)
385 dma_free_coherent(&nic->pdev->dev, sq->dmem.q_len, 387 dma_free_coherent(&nic->pdev->dev,
388 sq->dmem.q_len * TSO_HEADER_SIZE,
386 sq->tso_hdrs, sq->tso_hdrs_phys); 389 sq->tso_hdrs, sq->tso_hdrs_phys);
387 390
388 kfree(sq->skbuff); 391 kfree(sq->skbuff);
@@ -863,10 +866,11 @@ void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq,
863 continue; 866 continue;
864 } 867 }
865 skb = (struct sk_buff *)sq->skbuff[sq->head]; 868 skb = (struct sk_buff *)sq->skbuff[sq->head];
869 if (skb)
870 dev_kfree_skb_any(skb);
866 atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets); 871 atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets);
867 atomic64_add(hdr->tot_len, 872 atomic64_add(hdr->tot_len,
868 (atomic64_t *)&netdev->stats.tx_bytes); 873 (atomic64_t *)&netdev->stats.tx_bytes);
869 dev_kfree_skb_any(skb);
870 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); 874 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
871 } 875 }
872} 876}
@@ -992,7 +996,7 @@ static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
992 996
993 memset(gather, 0, SND_QUEUE_DESC_SIZE); 997 memset(gather, 0, SND_QUEUE_DESC_SIZE);
994 gather->subdesc_type = SQ_DESC_TYPE_GATHER; 998 gather->subdesc_type = SQ_DESC_TYPE_GATHER;
995 gather->ld_type = NIC_SEND_LD_TYPE_E_LDWB; 999 gather->ld_type = NIC_SEND_LD_TYPE_E_LDD;
996 gather->size = size; 1000 gather->size = size;
997 gather->addr = data; 1001 gather->addr = data;
998} 1002}
@@ -1048,7 +1052,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
1048 } 1052 }
1049 nicvf_sq_add_hdr_subdesc(sq, hdr_qentry, 1053 nicvf_sq_add_hdr_subdesc(sq, hdr_qentry,
1050 seg_subdescs - 1, skb, seg_len); 1054 seg_subdescs - 1, skb, seg_len);
1051 sq->skbuff[hdr_qentry] = 0; 1055 sq->skbuff[hdr_qentry] = (u64)NULL;
1052 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1056 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1053 1057
1054 desc_cnt += seg_subdescs; 1058 desc_cnt += seg_subdescs;
@@ -1062,6 +1066,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
1062 /* Inform HW to xmit all TSO segments */ 1066 /* Inform HW to xmit all TSO segments */
1063 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, 1067 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
1064 skb_get_queue_mapping(skb), desc_cnt); 1068 skb_get_queue_mapping(skb), desc_cnt);
1069 nic->drv_stats.tx_tso++;
1065 return 1; 1070 return 1;
1066} 1071}
1067 1072
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 8341bdf755d1..f0937b7bfe9f 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -62,7 +62,7 @@
62#define SND_QUEUE_CNT 8 62#define SND_QUEUE_CNT 8
63#define CMP_QUEUE_CNT 8 /* Max of RCV and SND qcount */ 63#define CMP_QUEUE_CNT 8 /* Max of RCV and SND qcount */
64 64
65#define SND_QSIZE SND_QUEUE_SIZE4 65#define SND_QSIZE SND_QUEUE_SIZE2
66#define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10)) 66#define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10))
67#define MAX_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE6 + 10)) 67#define MAX_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE6 + 10))
68#define SND_QUEUE_THRESH 2ULL 68#define SND_QUEUE_THRESH 2ULL
@@ -70,7 +70,10 @@
70/* Since timestamp not enabled, otherwise 2 */ 70/* Since timestamp not enabled, otherwise 2 */
71#define MAX_CQE_PER_PKT_XMIT 1 71#define MAX_CQE_PER_PKT_XMIT 1
72 72
73#define CMP_QSIZE CMP_QUEUE_SIZE4 73/* Keep CQ and SQ sizes same, if timestamping
74 * is enabled this equation will change.
75 */
76#define CMP_QSIZE CMP_QUEUE_SIZE2
74#define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10)) 77#define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10))
75#define CMP_QUEUE_CQE_THRESH 0 78#define CMP_QUEUE_CQE_THRESH 0
76#define CMP_QUEUE_TIMER_THRESH 220 /* 10usec */ 79#define CMP_QUEUE_TIMER_THRESH 220 /* 10usec */
@@ -87,7 +90,12 @@
87 90
88#define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \ 91#define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \
89 MAX_CQE_PER_PKT_XMIT) 92 MAX_CQE_PER_PKT_XMIT)
90#define RQ_CQ_DROP ((CMP_QUEUE_LEN - MAX_CQES_FOR_TX) / 256) 93/* Calculate number of CQEs to reserve for all SQEs.
94 * Its 1/256th level of CQ size.
95 * '+ 1' to account for pipelining
96 */
97#define RQ_CQ_DROP ((256 / (CMP_QUEUE_LEN / \
98 (CMP_QUEUE_LEN - MAX_CQES_FOR_TX))) + 1)
91 99
92/* Descriptor size in bytes */ 100/* Descriptor size in bytes */
93#define SND_QUEUE_DESC_SIZE 16 101#define SND_QUEUE_DESC_SIZE 16
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 633ec05dfe05..b961a89dc626 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -673,7 +673,10 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
673 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg); 673 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg);
674 bgx_flush_dmac_addrs(bgx, lmacid); 674 bgx_flush_dmac_addrs(bgx, lmacid);
675 675
676 if (lmac->phydev) 676 if ((bgx->lmac_type != BGX_MODE_XFI) &&
677 (bgx->lmac_type != BGX_MODE_XLAUI) &&
678 (bgx->lmac_type != BGX_MODE_40G_KR) &&
679 (bgx->lmac_type != BGX_MODE_10G_KR) && lmac->phydev)
677 phy_disconnect(lmac->phydev); 680 phy_disconnect(lmac->phydev);
678 681
679 lmac->phydev = NULL; 682 lmac->phydev = NULL;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 484eb8c37489..c3c7db41819d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -952,16 +952,23 @@ static int devlog_show(struct seq_file *seq, void *v)
952 * eventually have to put a format interpreter in here ... 952 * eventually have to put a format interpreter in here ...
953 */ 953 */
954 seq_printf(seq, "%10d %15llu %8s %8s ", 954 seq_printf(seq, "%10d %15llu %8s %8s ",
955 e->seqno, e->timestamp, 955 be32_to_cpu(e->seqno),
956 be64_to_cpu(e->timestamp),
956 (e->level < ARRAY_SIZE(devlog_level_strings) 957 (e->level < ARRAY_SIZE(devlog_level_strings)
957 ? devlog_level_strings[e->level] 958 ? devlog_level_strings[e->level]
958 : "UNKNOWN"), 959 : "UNKNOWN"),
959 (e->facility < ARRAY_SIZE(devlog_facility_strings) 960 (e->facility < ARRAY_SIZE(devlog_facility_strings)
960 ? devlog_facility_strings[e->facility] 961 ? devlog_facility_strings[e->facility]
961 : "UNKNOWN")); 962 : "UNKNOWN"));
962 seq_printf(seq, e->fmt, e->params[0], e->params[1], 963 seq_printf(seq, e->fmt,
963 e->params[2], e->params[3], e->params[4], 964 be32_to_cpu(e->params[0]),
964 e->params[5], e->params[6], e->params[7]); 965 be32_to_cpu(e->params[1]),
966 be32_to_cpu(e->params[2]),
967 be32_to_cpu(e->params[3]),
968 be32_to_cpu(e->params[4]),
969 be32_to_cpu(e->params[5]),
970 be32_to_cpu(e->params[6]),
971 be32_to_cpu(e->params[7]));
965 } 972 }
966 return 0; 973 return 0;
967} 974}
@@ -1043,23 +1050,17 @@ static int devlog_open(struct inode *inode, struct file *file)
1043 return ret; 1050 return ret;
1044 } 1051 }
1045 1052
1046 /* Translate log multi-byte integral elements into host native format 1053 /* Find the earliest (lowest Sequence Number) log entry in the
1047 * and determine where the first entry in the log is. 1054 * circular Device Log.
1048 */ 1055 */
1049 for (fseqno = ~((u32)0), index = 0; index < dinfo->nentries; index++) { 1056 for (fseqno = ~((u32)0), index = 0; index < dinfo->nentries; index++) {
1050 struct fw_devlog_e *e = &dinfo->log[index]; 1057 struct fw_devlog_e *e = &dinfo->log[index];
1051 int i;
1052 __u32 seqno; 1058 __u32 seqno;
1053 1059
1054 if (e->timestamp == 0) 1060 if (e->timestamp == 0)
1055 continue; 1061 continue;
1056 1062
1057 e->timestamp = (__force __be64)be64_to_cpu(e->timestamp);
1058 seqno = be32_to_cpu(e->seqno); 1063 seqno = be32_to_cpu(e->seqno);
1059 for (i = 0; i < 8; i++)
1060 e->params[i] =
1061 (__force __be32)be32_to_cpu(e->params[i]);
1062
1063 if (seqno < fseqno) { 1064 if (seqno < fseqno) {
1064 fseqno = seqno; 1065 fseqno = seqno;
1065 dinfo->first = index; 1066 dinfo->first = index;
@@ -2331,10 +2332,11 @@ int t4_setup_debugfs(struct adapter *adap)
2331 EXT_MEM1_SIZE_G(size)); 2332 EXT_MEM1_SIZE_G(size));
2332 } 2333 }
2333 } else { 2334 } else {
2334 if (i & EXT_MEM_ENABLE_F) 2335 if (i & EXT_MEM_ENABLE_F) {
2335 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A); 2336 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
2336 add_debugfs_mem(adap, "mc", MEM_MC, 2337 add_debugfs_mem(adap, "mc", MEM_MC,
2337 EXT_MEM_SIZE_G(size)); 2338 EXT_MEM_SIZE_G(size));
2339 }
2338 } 2340 }
2339 2341
2340 de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap, 2342 de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap,
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index da2004e2a741..918a8e42139b 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1170,7 +1170,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
1170 wq_work_done, 1170 wq_work_done,
1171 0 /* dont unmask intr */, 1171 0 /* dont unmask intr */,
1172 0 /* dont reset intr timer */); 1172 0 /* dont reset intr timer */);
1173 return rq_work_done; 1173 return budget;
1174 } 1174 }
1175 1175
1176 if (budget > 0) 1176 if (budget > 0)
@@ -1191,6 +1191,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
1191 0 /* don't reset intr timer */); 1191 0 /* don't reset intr timer */);
1192 1192
1193 err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); 1193 err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
1194 enic_poll_unlock_napi(&enic->rq[cq_rq], napi);
1194 1195
1195 /* Buffer allocation failed. Stay in polling 1196 /* Buffer allocation failed. Stay in polling
1196 * mode so we can try to fill the ring again. 1197 * mode so we can try to fill the ring again.
@@ -1208,7 +1209,6 @@ static int enic_poll(struct napi_struct *napi, int budget)
1208 napi_complete(napi); 1209 napi_complete(napi);
1209 vnic_intr_unmask(&enic->intr[intr]); 1210 vnic_intr_unmask(&enic->intr[intr]);
1210 } 1211 }
1211 enic_poll_unlock_napi(&enic->rq[cq_rq], napi);
1212 1212
1213 return rq_work_done; 1213 return rq_work_done;
1214} 1214}
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 2716e6f30d9a..00e3a6b6b822 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -620,6 +620,11 @@ enum be_if_flags {
620 BE_IF_FLAGS_VLAN_PROMISCUOUS |\ 620 BE_IF_FLAGS_VLAN_PROMISCUOUS |\
621 BE_IF_FLAGS_MCAST_PROMISCUOUS) 621 BE_IF_FLAGS_MCAST_PROMISCUOUS)
622 622
623#define BE_IF_EN_FLAGS (BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |\
624 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_UNTAGGED)
625
626#define BE_IF_ALL_FILT_FLAGS (BE_IF_EN_FLAGS | BE_IF_FLAGS_ALL_PROMISCUOUS)
627
623/* An RX interface is an object with one or more MAC addresses and 628/* An RX interface is an object with one or more MAC addresses and
624 * filtering capabilities. */ 629 * filtering capabilities. */
625struct be_cmd_req_if_create { 630struct be_cmd_req_if_create {
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 6f642426308c..6ca693b03f33 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -273,6 +273,10 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
273 if (ether_addr_equal(addr->sa_data, netdev->dev_addr)) 273 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
274 return 0; 274 return 0;
275 275
276 /* if device is not running, copy MAC to netdev->dev_addr */
277 if (!netif_running(netdev))
278 goto done;
279
276 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT 280 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
277 * privilege or if PF did not provision the new MAC address. 281 * privilege or if PF did not provision the new MAC address.
278 * On BE3, this cmd will always fail if the VF doesn't have the 282 * On BE3, this cmd will always fail if the VF doesn't have the
@@ -307,9 +311,9 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
307 status = -EPERM; 311 status = -EPERM;
308 goto err; 312 goto err;
309 } 313 }
310 314done:
311 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 315 ether_addr_copy(netdev->dev_addr, addr->sa_data);
312 dev_info(dev, "MAC address changed to %pM\n", mac); 316 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
313 return 0; 317 return 0;
314err: 318err:
315 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data); 319 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
@@ -2447,10 +2451,24 @@ static void be_eq_clean(struct be_eq_obj *eqo)
2447 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0); 2451 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
2448} 2452}
2449 2453
2450static void be_rx_cq_clean(struct be_rx_obj *rxo) 2454/* Free posted rx buffers that were not used */
2455static void be_rxq_clean(struct be_rx_obj *rxo)
2451{ 2456{
2452 struct be_rx_page_info *page_info;
2453 struct be_queue_info *rxq = &rxo->q; 2457 struct be_queue_info *rxq = &rxo->q;
2458 struct be_rx_page_info *page_info;
2459
2460 while (atomic_read(&rxq->used) > 0) {
2461 page_info = get_rx_page_info(rxo);
2462 put_page(page_info->page);
2463 memset(page_info, 0, sizeof(*page_info));
2464 }
2465 BUG_ON(atomic_read(&rxq->used));
2466 rxq->tail = 0;
2467 rxq->head = 0;
2468}
2469
2470static void be_rx_cq_clean(struct be_rx_obj *rxo)
2471{
2454 struct be_queue_info *rx_cq = &rxo->cq; 2472 struct be_queue_info *rx_cq = &rxo->cq;
2455 struct be_rx_compl_info *rxcp; 2473 struct be_rx_compl_info *rxcp;
2456 struct be_adapter *adapter = rxo->adapter; 2474 struct be_adapter *adapter = rxo->adapter;
@@ -2487,16 +2505,6 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)
2487 2505
2488 /* After cleanup, leave the CQ in unarmed state */ 2506 /* After cleanup, leave the CQ in unarmed state */
2489 be_cq_notify(adapter, rx_cq->id, false, 0); 2507 be_cq_notify(adapter, rx_cq->id, false, 0);
2490
2491 /* Then free posted rx buffers that were not used */
2492 while (atomic_read(&rxq->used) > 0) {
2493 page_info = get_rx_page_info(rxo);
2494 put_page(page_info->page);
2495 memset(page_info, 0, sizeof(*page_info));
2496 }
2497 BUG_ON(atomic_read(&rxq->used));
2498 rxq->tail = 0;
2499 rxq->head = 0;
2500} 2508}
2501 2509
2502static void be_tx_compl_clean(struct be_adapter *adapter) 2510static void be_tx_compl_clean(struct be_adapter *adapter)
@@ -2576,8 +2584,8 @@ static void be_evt_queues_destroy(struct be_adapter *adapter)
2576 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ); 2584 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
2577 napi_hash_del(&eqo->napi); 2585 napi_hash_del(&eqo->napi);
2578 netif_napi_del(&eqo->napi); 2586 netif_napi_del(&eqo->napi);
2587 free_cpumask_var(eqo->affinity_mask);
2579 } 2588 }
2580 free_cpumask_var(eqo->affinity_mask);
2581 be_queue_free(adapter, &eqo->q); 2589 be_queue_free(adapter, &eqo->q);
2582 } 2590 }
2583} 2591}
@@ -2594,13 +2602,7 @@ static int be_evt_queues_create(struct be_adapter *adapter)
2594 2602
2595 for_all_evt_queues(adapter, eqo, i) { 2603 for_all_evt_queues(adapter, eqo, i) {
2596 int numa_node = dev_to_node(&adapter->pdev->dev); 2604 int numa_node = dev_to_node(&adapter->pdev->dev);
2597 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL)) 2605
2598 return -ENOMEM;
2599 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2600 eqo->affinity_mask);
2601 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2602 BE_NAPI_WEIGHT);
2603 napi_hash_add(&eqo->napi);
2604 aic = &adapter->aic_obj[i]; 2606 aic = &adapter->aic_obj[i];
2605 eqo->adapter = adapter; 2607 eqo->adapter = adapter;
2606 eqo->idx = i; 2608 eqo->idx = i;
@@ -2616,6 +2618,14 @@ static int be_evt_queues_create(struct be_adapter *adapter)
2616 rc = be_cmd_eq_create(adapter, eqo); 2618 rc = be_cmd_eq_create(adapter, eqo);
2617 if (rc) 2619 if (rc)
2618 return rc; 2620 return rc;
2621
2622 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2623 return -ENOMEM;
2624 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2625 eqo->affinity_mask);
2626 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2627 BE_NAPI_WEIGHT);
2628 napi_hash_add(&eqo->napi);
2619 } 2629 }
2620 return 0; 2630 return 0;
2621} 2631}
@@ -3354,13 +3364,54 @@ static void be_rx_qs_destroy(struct be_adapter *adapter)
3354 for_all_rx_queues(adapter, rxo, i) { 3364 for_all_rx_queues(adapter, rxo, i) {
3355 q = &rxo->q; 3365 q = &rxo->q;
3356 if (q->created) { 3366 if (q->created) {
3367 /* If RXQs are destroyed while in an "out of buffer"
3368 * state, there is a possibility of an HW stall on
3369 * Lancer. So, post 64 buffers to each queue to relieve
3370 * the "out of buffer" condition.
3371 * Make sure there's space in the RXQ before posting.
3372 */
3373 if (lancer_chip(adapter)) {
3374 be_rx_cq_clean(rxo);
3375 if (atomic_read(&q->used) == 0)
3376 be_post_rx_frags(rxo, GFP_KERNEL,
3377 MAX_RX_POST);
3378 }
3379
3357 be_cmd_rxq_destroy(adapter, q); 3380 be_cmd_rxq_destroy(adapter, q);
3358 be_rx_cq_clean(rxo); 3381 be_rx_cq_clean(rxo);
3382 be_rxq_clean(rxo);
3359 } 3383 }
3360 be_queue_free(adapter, q); 3384 be_queue_free(adapter, q);
3361 } 3385 }
3362} 3386}
3363 3387
3388static void be_disable_if_filters(struct be_adapter *adapter)
3389{
3390 be_cmd_pmac_del(adapter, adapter->if_handle,
3391 adapter->pmac_id[0], 0);
3392
3393 be_clear_uc_list(adapter);
3394
3395 /* The IFACE flags are enabled in the open path and cleared
3396 * in the close path. When a VF gets detached from the host and
3397 * assigned to a VM the following happens:
3398 * - VF's IFACE flags get cleared in the detach path
3399 * - IFACE create is issued by the VF in the attach path
3400 * Due to a bug in the BE3/Skyhawk-R FW
3401 * (Lancer FW doesn't have the bug), the IFACE capability flags
3402 * specified along with the IFACE create cmd issued by a VF are not
3403 * honoured by FW. As a consequence, if a *new* driver
3404 * (that enables/disables IFACE flags in open/close)
3405 * is loaded in the host and an *old* driver is * used by a VM/VF,
3406 * the IFACE gets created *without* the needed flags.
3407 * To avoid this, disable RX-filter flags only for Lancer.
3408 */
3409 if (lancer_chip(adapter)) {
3410 be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
3411 adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
3412 }
3413}
3414
3364static int be_close(struct net_device *netdev) 3415static int be_close(struct net_device *netdev)
3365{ 3416{
3366 struct be_adapter *adapter = netdev_priv(netdev); 3417 struct be_adapter *adapter = netdev_priv(netdev);
@@ -3373,6 +3424,8 @@ static int be_close(struct net_device *netdev)
3373 if (!(adapter->flags & BE_FLAGS_SETUP_DONE)) 3424 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3374 return 0; 3425 return 0;
3375 3426
3427 be_disable_if_filters(adapter);
3428
3376 be_roce_dev_close(adapter); 3429 be_roce_dev_close(adapter);
3377 3430
3378 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) { 3431 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
@@ -3392,7 +3445,6 @@ static int be_close(struct net_device *netdev)
3392 be_tx_compl_clean(adapter); 3445 be_tx_compl_clean(adapter);
3393 3446
3394 be_rx_qs_destroy(adapter); 3447 be_rx_qs_destroy(adapter);
3395 be_clear_uc_list(adapter);
3396 3448
3397 for_all_evt_queues(adapter, eqo, i) { 3449 for_all_evt_queues(adapter, eqo, i) {
3398 if (msix_enabled(adapter)) 3450 if (msix_enabled(adapter))
@@ -3477,6 +3529,31 @@ static int be_rx_qs_create(struct be_adapter *adapter)
3477 return 0; 3529 return 0;
3478} 3530}
3479 3531
3532static int be_enable_if_filters(struct be_adapter *adapter)
3533{
3534 int status;
3535
3536 status = be_cmd_rx_filter(adapter, BE_IF_EN_FLAGS, ON);
3537 if (status)
3538 return status;
3539
3540 /* For BE3 VFs, the PF programs the initial MAC address */
3541 if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
3542 status = be_cmd_pmac_add(adapter, adapter->netdev->dev_addr,
3543 adapter->if_handle,
3544 &adapter->pmac_id[0], 0);
3545 if (status)
3546 return status;
3547 }
3548
3549 if (adapter->vlans_added)
3550 be_vid_config(adapter);
3551
3552 be_set_rx_mode(adapter->netdev);
3553
3554 return 0;
3555}
3556
3480static int be_open(struct net_device *netdev) 3557static int be_open(struct net_device *netdev)
3481{ 3558{
3482 struct be_adapter *adapter = netdev_priv(netdev); 3559 struct be_adapter *adapter = netdev_priv(netdev);
@@ -3490,6 +3567,10 @@ static int be_open(struct net_device *netdev)
3490 if (status) 3567 if (status)
3491 goto err; 3568 goto err;
3492 3569
3570 status = be_enable_if_filters(adapter);
3571 if (status)
3572 goto err;
3573
3493 status = be_irq_register(adapter); 3574 status = be_irq_register(adapter);
3494 if (status) 3575 if (status)
3495 goto err; 3576 goto err;
@@ -3686,16 +3767,6 @@ static void be_cancel_err_detection(struct be_adapter *adapter)
3686 } 3767 }
3687} 3768}
3688 3769
3689static void be_mac_clear(struct be_adapter *adapter)
3690{
3691 if (adapter->pmac_id) {
3692 be_cmd_pmac_del(adapter, adapter->if_handle,
3693 adapter->pmac_id[0], 0);
3694 kfree(adapter->pmac_id);
3695 adapter->pmac_id = NULL;
3696 }
3697}
3698
3699#ifdef CONFIG_BE2NET_VXLAN 3770#ifdef CONFIG_BE2NET_VXLAN
3700static void be_disable_vxlan_offloads(struct be_adapter *adapter) 3771static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3701{ 3772{
@@ -3770,8 +3841,8 @@ static int be_clear(struct be_adapter *adapter)
3770#ifdef CONFIG_BE2NET_VXLAN 3841#ifdef CONFIG_BE2NET_VXLAN
3771 be_disable_vxlan_offloads(adapter); 3842 be_disable_vxlan_offloads(adapter);
3772#endif 3843#endif
3773 /* delete the primary mac along with the uc-mac list */ 3844 kfree(adapter->pmac_id);
3774 be_mac_clear(adapter); 3845 adapter->pmac_id = NULL;
3775 3846
3776 be_cmd_if_destroy(adapter, adapter->if_handle, 0); 3847 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
3777 3848
@@ -3782,25 +3853,11 @@ static int be_clear(struct be_adapter *adapter)
3782 return 0; 3853 return 0;
3783} 3854}
3784 3855
3785static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3786 u32 cap_flags, u32 vf)
3787{
3788 u32 en_flags;
3789
3790 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3791 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
3792 BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
3793
3794 en_flags &= cap_flags;
3795
3796 return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf);
3797}
3798
3799static int be_vfs_if_create(struct be_adapter *adapter) 3856static int be_vfs_if_create(struct be_adapter *adapter)
3800{ 3857{
3801 struct be_resources res = {0}; 3858 struct be_resources res = {0};
3859 u32 cap_flags, en_flags, vf;
3802 struct be_vf_cfg *vf_cfg; 3860 struct be_vf_cfg *vf_cfg;
3803 u32 cap_flags, vf;
3804 int status; 3861 int status;
3805 3862
3806 /* If a FW profile exists, then cap_flags are updated */ 3863 /* If a FW profile exists, then cap_flags are updated */
@@ -3821,8 +3878,12 @@ static int be_vfs_if_create(struct be_adapter *adapter)
3821 } 3878 }
3822 } 3879 }
3823 3880
3824 status = be_if_create(adapter, &vf_cfg->if_handle, 3881 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
3825 cap_flags, vf + 1); 3882 BE_IF_FLAGS_BROADCAST |
3883 BE_IF_FLAGS_MULTICAST |
3884 BE_IF_FLAGS_PASS_L3L4_ERRORS);
3885 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3886 &vf_cfg->if_handle, vf + 1);
3826 if (status) 3887 if (status)
3827 return status; 3888 return status;
3828 } 3889 }
@@ -4194,15 +4255,8 @@ static int be_mac_setup(struct be_adapter *adapter)
4194 4255
4195 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); 4256 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4196 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); 4257 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
4197 } else {
4198 /* Maybe the HW was reset; dev_addr must be re-programmed */
4199 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
4200 } 4258 }
4201 4259
4202 /* For BE3-R VFs, the PF programs the initial MAC address */
4203 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
4204 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
4205 &adapter->pmac_id[0], 0);
4206 return 0; 4260 return 0;
4207} 4261}
4208 4262
@@ -4342,6 +4396,7 @@ static int be_func_init(struct be_adapter *adapter)
4342static int be_setup(struct be_adapter *adapter) 4396static int be_setup(struct be_adapter *adapter)
4343{ 4397{
4344 struct device *dev = &adapter->pdev->dev; 4398 struct device *dev = &adapter->pdev->dev;
4399 u32 en_flags;
4345 int status; 4400 int status;
4346 4401
4347 status = be_func_init(adapter); 4402 status = be_func_init(adapter);
@@ -4364,8 +4419,11 @@ static int be_setup(struct be_adapter *adapter)
4364 if (status) 4419 if (status)
4365 goto err; 4420 goto err;
4366 4421
4367 status = be_if_create(adapter, &adapter->if_handle, 4422 /* will enable all the needed filter flags in be_open() */
4368 be_if_cap_flags(adapter), 0); 4423 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
4424 en_flags = en_flags & be_if_cap_flags(adapter);
4425 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
4426 &adapter->if_handle, 0);
4369 if (status) 4427 if (status)
4370 goto err; 4428 goto err;
4371 4429
@@ -4391,11 +4449,6 @@ static int be_setup(struct be_adapter *adapter)
4391 dev_err(dev, "Please upgrade firmware to version >= 4.0\n"); 4449 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4392 } 4450 }
4393 4451
4394 if (adapter->vlans_added)
4395 be_vid_config(adapter);
4396
4397 be_set_rx_mode(adapter->netdev);
4398
4399 status = be_cmd_set_flow_control(adapter, adapter->tx_fc, 4452 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4400 adapter->rx_fc); 4453 adapter->rx_fc);
4401 if (status) 4454 if (status)
@@ -5121,7 +5174,7 @@ static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
5121 struct device *dev = &adapter->pdev->dev; 5174 struct device *dev = &adapter->pdev->dev;
5122 int status; 5175 int status;
5123 5176
5124 if (lancer_chip(adapter) || BEx_chip(adapter)) 5177 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
5125 return; 5178 return;
5126 5179
5127 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) { 5180 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
@@ -5168,7 +5221,7 @@ static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
5168{ 5221{
5169 struct be_adapter *adapter = netdev_priv(netdev); 5222 struct be_adapter *adapter = netdev_priv(netdev);
5170 5223
5171 if (lancer_chip(adapter) || BEx_chip(adapter)) 5224 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
5172 return; 5225 return;
5173 5226
5174 if (adapter->vxlan_port != port) 5227 if (adapter->vxlan_port != port)
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 1eee73cccdf5..99d33e2d35e6 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -562,6 +562,7 @@ struct fec_enet_private {
562}; 562};
563 563
564void fec_ptp_init(struct platform_device *pdev); 564void fec_ptp_init(struct platform_device *pdev);
565void fec_ptp_stop(struct platform_device *pdev);
565void fec_ptp_start_cyclecounter(struct net_device *ndev); 566void fec_ptp_start_cyclecounter(struct net_device *ndev);
566int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr); 567int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr);
567int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr); 568int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 1f89c59b4353..271bb5862346 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -24,6 +24,7 @@
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/string.h> 26#include <linux/string.h>
27#include <linux/pm_runtime.h>
27#include <linux/ptrace.h> 28#include <linux/ptrace.h>
28#include <linux/errno.h> 29#include <linux/errno.h>
29#include <linux/ioport.h> 30#include <linux/ioport.h>
@@ -77,6 +78,7 @@ static void fec_enet_itr_coal_init(struct net_device *ndev);
77#define FEC_ENET_RAEM_V 0x8 78#define FEC_ENET_RAEM_V 0x8
78#define FEC_ENET_RAFL_V 0x8 79#define FEC_ENET_RAFL_V 0x8
79#define FEC_ENET_OPD_V 0xFFF0 80#define FEC_ENET_OPD_V 0xFFF0
81#define FEC_MDIO_PM_TIMEOUT 100 /* ms */
80 82
81static struct platform_device_id fec_devtype[] = { 83static struct platform_device_id fec_devtype[] = {
82 { 84 {
@@ -1767,7 +1769,13 @@ static void fec_enet_adjust_link(struct net_device *ndev)
1767static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 1769static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1768{ 1770{
1769 struct fec_enet_private *fep = bus->priv; 1771 struct fec_enet_private *fep = bus->priv;
1772 struct device *dev = &fep->pdev->dev;
1770 unsigned long time_left; 1773 unsigned long time_left;
1774 int ret = 0;
1775
1776 ret = pm_runtime_get_sync(dev);
1777 if (IS_ERR_VALUE(ret))
1778 return ret;
1771 1779
1772 fep->mii_timeout = 0; 1780 fep->mii_timeout = 0;
1773 init_completion(&fep->mdio_done); 1781 init_completion(&fep->mdio_done);
@@ -1783,18 +1791,30 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1783 if (time_left == 0) { 1791 if (time_left == 0) {
1784 fep->mii_timeout = 1; 1792 fep->mii_timeout = 1;
1785 netdev_err(fep->netdev, "MDIO read timeout\n"); 1793 netdev_err(fep->netdev, "MDIO read timeout\n");
1786 return -ETIMEDOUT; 1794 ret = -ETIMEDOUT;
1795 goto out;
1787 } 1796 }
1788 1797
1789 /* return value */ 1798 ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
1790 return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); 1799
1800out:
1801 pm_runtime_mark_last_busy(dev);
1802 pm_runtime_put_autosuspend(dev);
1803
1804 return ret;
1791} 1805}
1792 1806
1793static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, 1807static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
1794 u16 value) 1808 u16 value)
1795{ 1809{
1796 struct fec_enet_private *fep = bus->priv; 1810 struct fec_enet_private *fep = bus->priv;
1811 struct device *dev = &fep->pdev->dev;
1797 unsigned long time_left; 1812 unsigned long time_left;
1813 int ret = 0;
1814
1815 ret = pm_runtime_get_sync(dev);
1816 if (IS_ERR_VALUE(ret))
1817 return ret;
1798 1818
1799 fep->mii_timeout = 0; 1819 fep->mii_timeout = 0;
1800 init_completion(&fep->mdio_done); 1820 init_completion(&fep->mdio_done);
@@ -1811,10 +1831,13 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
1811 if (time_left == 0) { 1831 if (time_left == 0) {
1812 fep->mii_timeout = 1; 1832 fep->mii_timeout = 1;
1813 netdev_err(fep->netdev, "MDIO write timeout\n"); 1833 netdev_err(fep->netdev, "MDIO write timeout\n");
1814 return -ETIMEDOUT; 1834 ret = -ETIMEDOUT;
1815 } 1835 }
1816 1836
1817 return 0; 1837 pm_runtime_mark_last_busy(dev);
1838 pm_runtime_put_autosuspend(dev);
1839
1840 return ret;
1818} 1841}
1819 1842
1820static int fec_enet_clk_enable(struct net_device *ndev, bool enable) 1843static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
@@ -1826,9 +1849,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
1826 ret = clk_prepare_enable(fep->clk_ahb); 1849 ret = clk_prepare_enable(fep->clk_ahb);
1827 if (ret) 1850 if (ret)
1828 return ret; 1851 return ret;
1829 ret = clk_prepare_enable(fep->clk_ipg);
1830 if (ret)
1831 goto failed_clk_ipg;
1832 if (fep->clk_enet_out) { 1852 if (fep->clk_enet_out) {
1833 ret = clk_prepare_enable(fep->clk_enet_out); 1853 ret = clk_prepare_enable(fep->clk_enet_out);
1834 if (ret) 1854 if (ret)
@@ -1852,7 +1872,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
1852 } 1872 }
1853 } else { 1873 } else {
1854 clk_disable_unprepare(fep->clk_ahb); 1874 clk_disable_unprepare(fep->clk_ahb);
1855 clk_disable_unprepare(fep->clk_ipg);
1856 if (fep->clk_enet_out) 1875 if (fep->clk_enet_out)
1857 clk_disable_unprepare(fep->clk_enet_out); 1876 clk_disable_unprepare(fep->clk_enet_out);
1858 if (fep->clk_ptp) { 1877 if (fep->clk_ptp) {
@@ -1874,8 +1893,6 @@ failed_clk_ptp:
1874 if (fep->clk_enet_out) 1893 if (fep->clk_enet_out)
1875 clk_disable_unprepare(fep->clk_enet_out); 1894 clk_disable_unprepare(fep->clk_enet_out);
1876failed_clk_enet_out: 1895failed_clk_enet_out:
1877 clk_disable_unprepare(fep->clk_ipg);
1878failed_clk_ipg:
1879 clk_disable_unprepare(fep->clk_ahb); 1896 clk_disable_unprepare(fep->clk_ahb);
1880 1897
1881 return ret; 1898 return ret;
@@ -2847,10 +2864,14 @@ fec_enet_open(struct net_device *ndev)
2847 struct fec_enet_private *fep = netdev_priv(ndev); 2864 struct fec_enet_private *fep = netdev_priv(ndev);
2848 int ret; 2865 int ret;
2849 2866
2867 ret = pm_runtime_get_sync(&fep->pdev->dev);
2868 if (IS_ERR_VALUE(ret))
2869 return ret;
2870
2850 pinctrl_pm_select_default_state(&fep->pdev->dev); 2871 pinctrl_pm_select_default_state(&fep->pdev->dev);
2851 ret = fec_enet_clk_enable(ndev, true); 2872 ret = fec_enet_clk_enable(ndev, true);
2852 if (ret) 2873 if (ret)
2853 return ret; 2874 goto clk_enable;
2854 2875
2855 /* I should reset the ring buffers here, but I don't yet know 2876 /* I should reset the ring buffers here, but I don't yet know
2856 * a simple way to do that. 2877 * a simple way to do that.
@@ -2881,6 +2902,9 @@ err_enet_mii_probe:
2881 fec_enet_free_buffers(ndev); 2902 fec_enet_free_buffers(ndev);
2882err_enet_alloc: 2903err_enet_alloc:
2883 fec_enet_clk_enable(ndev, false); 2904 fec_enet_clk_enable(ndev, false);
2905clk_enable:
2906 pm_runtime_mark_last_busy(&fep->pdev->dev);
2907 pm_runtime_put_autosuspend(&fep->pdev->dev);
2884 pinctrl_pm_select_sleep_state(&fep->pdev->dev); 2908 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
2885 return ret; 2909 return ret;
2886} 2910}
@@ -2903,6 +2927,9 @@ fec_enet_close(struct net_device *ndev)
2903 2927
2904 fec_enet_clk_enable(ndev, false); 2928 fec_enet_clk_enable(ndev, false);
2905 pinctrl_pm_select_sleep_state(&fep->pdev->dev); 2929 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
2930 pm_runtime_mark_last_busy(&fep->pdev->dev);
2931 pm_runtime_put_autosuspend(&fep->pdev->dev);
2932
2906 fec_enet_free_buffers(ndev); 2933 fec_enet_free_buffers(ndev);
2907 2934
2908 return 0; 2935 return 0;
@@ -3115,8 +3142,8 @@ static int fec_enet_init(struct net_device *ndev)
3115 fep->bufdesc_size; 3142 fep->bufdesc_size;
3116 3143
3117 /* Allocate memory for buffer descriptors. */ 3144 /* Allocate memory for buffer descriptors. */
3118 cbd_base = dma_alloc_coherent(NULL, bd_size, &bd_dma, 3145 cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma,
3119 GFP_KERNEL); 3146 GFP_KERNEL);
3120 if (!cbd_base) { 3147 if (!cbd_base) {
3121 return -ENOMEM; 3148 return -ENOMEM;
3122 } 3149 }
@@ -3388,6 +3415,10 @@ fec_probe(struct platform_device *pdev)
3388 if (ret) 3415 if (ret)
3389 goto failed_clk; 3416 goto failed_clk;
3390 3417
3418 ret = clk_prepare_enable(fep->clk_ipg);
3419 if (ret)
3420 goto failed_clk_ipg;
3421
3391 fep->reg_phy = devm_regulator_get(&pdev->dev, "phy"); 3422 fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
3392 if (!IS_ERR(fep->reg_phy)) { 3423 if (!IS_ERR(fep->reg_phy)) {
3393 ret = regulator_enable(fep->reg_phy); 3424 ret = regulator_enable(fep->reg_phy);
@@ -3400,6 +3431,12 @@ fec_probe(struct platform_device *pdev)
3400 fep->reg_phy = NULL; 3431 fep->reg_phy = NULL;
3401 } 3432 }
3402 3433
3434 pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
3435 pm_runtime_use_autosuspend(&pdev->dev);
3436 pm_runtime_get_noresume(&pdev->dev);
3437 pm_runtime_set_active(&pdev->dev);
3438 pm_runtime_enable(&pdev->dev);
3439
3403 fec_reset_phy(pdev); 3440 fec_reset_phy(pdev);
3404 3441
3405 if (fep->bufdesc_ex) 3442 if (fep->bufdesc_ex)
@@ -3447,6 +3484,10 @@ fec_probe(struct platform_device *pdev)
3447 3484
3448 fep->rx_copybreak = COPYBREAK_DEFAULT; 3485 fep->rx_copybreak = COPYBREAK_DEFAULT;
3449 INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); 3486 INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
3487
3488 pm_runtime_mark_last_busy(&pdev->dev);
3489 pm_runtime_put_autosuspend(&pdev->dev);
3490
3450 return 0; 3491 return 0;
3451 3492
3452failed_register: 3493failed_register:
@@ -3454,9 +3495,12 @@ failed_register:
3454failed_mii_init: 3495failed_mii_init:
3455failed_irq: 3496failed_irq:
3456failed_init: 3497failed_init:
3498 fec_ptp_stop(pdev);
3457 if (fep->reg_phy) 3499 if (fep->reg_phy)
3458 regulator_disable(fep->reg_phy); 3500 regulator_disable(fep->reg_phy);
3459failed_regulator: 3501failed_regulator:
3502 clk_disable_unprepare(fep->clk_ipg);
3503failed_clk_ipg:
3460 fec_enet_clk_enable(ndev, false); 3504 fec_enet_clk_enable(ndev, false);
3461failed_clk: 3505failed_clk:
3462failed_phy: 3506failed_phy:
@@ -3473,14 +3517,12 @@ fec_drv_remove(struct platform_device *pdev)
3473 struct net_device *ndev = platform_get_drvdata(pdev); 3517 struct net_device *ndev = platform_get_drvdata(pdev);
3474 struct fec_enet_private *fep = netdev_priv(ndev); 3518 struct fec_enet_private *fep = netdev_priv(ndev);
3475 3519
3476 cancel_delayed_work_sync(&fep->time_keep);
3477 cancel_work_sync(&fep->tx_timeout_work); 3520 cancel_work_sync(&fep->tx_timeout_work);
3521 fec_ptp_stop(pdev);
3478 unregister_netdev(ndev); 3522 unregister_netdev(ndev);
3479 fec_enet_mii_remove(fep); 3523 fec_enet_mii_remove(fep);
3480 if (fep->reg_phy) 3524 if (fep->reg_phy)
3481 regulator_disable(fep->reg_phy); 3525 regulator_disable(fep->reg_phy);
3482 if (fep->ptp_clock)
3483 ptp_clock_unregister(fep->ptp_clock);
3484 of_node_put(fep->phy_node); 3526 of_node_put(fep->phy_node);
3485 free_netdev(ndev); 3527 free_netdev(ndev);
3486 3528
@@ -3568,7 +3610,28 @@ failed_clk:
3568 return ret; 3610 return ret;
3569} 3611}
3570 3612
3571static SIMPLE_DEV_PM_OPS(fec_pm_ops, fec_suspend, fec_resume); 3613static int __maybe_unused fec_runtime_suspend(struct device *dev)
3614{
3615 struct net_device *ndev = dev_get_drvdata(dev);
3616 struct fec_enet_private *fep = netdev_priv(ndev);
3617
3618 clk_disable_unprepare(fep->clk_ipg);
3619
3620 return 0;
3621}
3622
3623static int __maybe_unused fec_runtime_resume(struct device *dev)
3624{
3625 struct net_device *ndev = dev_get_drvdata(dev);
3626 struct fec_enet_private *fep = netdev_priv(ndev);
3627
3628 return clk_prepare_enable(fep->clk_ipg);
3629}
3630
3631static const struct dev_pm_ops fec_pm_ops = {
3632 SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
3633 SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL)
3634};
3572 3635
3573static struct platform_driver fec_driver = { 3636static struct platform_driver fec_driver = {
3574 .driver = { 3637 .driver = {
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index a15663ad7f5e..f457a23d0bfb 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -604,6 +604,16 @@ void fec_ptp_init(struct platform_device *pdev)
604 schedule_delayed_work(&fep->time_keep, HZ); 604 schedule_delayed_work(&fep->time_keep, HZ);
605} 605}
606 606
607void fec_ptp_stop(struct platform_device *pdev)
608{
609 struct net_device *ndev = platform_get_drvdata(pdev);
610 struct fec_enet_private *fep = netdev_priv(ndev);
611
612 cancel_delayed_work_sync(&fep->time_keep);
613 if (fep->ptp_clock)
614 ptp_clock_unregister(fep->ptp_clock);
615}
616
607/** 617/**
608 * fec_ptp_check_pps_event 618 * fec_ptp_check_pps_event
609 * @fep: the fec_enet_private structure handle 619 * @fep: the fec_enet_private structure handle
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 56316db6c5a6..cf8e54652df9 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -586,7 +586,8 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
586 frag = skb_shinfo(skb)->frags; 586 frag = skb_shinfo(skb)->frags;
587 while (nr_frags) { 587 while (nr_frags) {
588 CBDC_SC(bdp, 588 CBDC_SC(bdp,
589 BD_ENET_TX_STATS | BD_ENET_TX_LAST | BD_ENET_TX_TC); 589 BD_ENET_TX_STATS | BD_ENET_TX_INTR | BD_ENET_TX_LAST |
590 BD_ENET_TX_TC);
590 CBDS_SC(bdp, BD_ENET_TX_READY); 591 CBDS_SC(bdp, BD_ENET_TX_READY);
591 592
592 if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0) 593 if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index b34214e2df5f..016743e355de 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -110,7 +110,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
110} 110}
111 111
112#define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB) 112#define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB)
113#define FEC_NAPI_TX_EVENT_MSK (FEC_ENET_TXF | FEC_ENET_TXB) 113#define FEC_NAPI_TX_EVENT_MSK (FEC_ENET_TXF)
114#define FEC_RX_EVENT (FEC_ENET_RXF) 114#define FEC_RX_EVENT (FEC_ENET_RXF)
115#define FEC_TX_EVENT (FEC_ENET_TXF) 115#define FEC_TX_EVENT (FEC_ENET_TXF)
116#define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \ 116#define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index ff875028fdff..10b3bbbbac8e 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -565,22 +565,6 @@ static void gfar_ints_enable(struct gfar_private *priv)
565 } 565 }
566} 566}
567 567
568static void lock_tx_qs(struct gfar_private *priv)
569{
570 int i;
571
572 for (i = 0; i < priv->num_tx_queues; i++)
573 spin_lock(&priv->tx_queue[i]->txlock);
574}
575
576static void unlock_tx_qs(struct gfar_private *priv)
577{
578 int i;
579
580 for (i = 0; i < priv->num_tx_queues; i++)
581 spin_unlock(&priv->tx_queue[i]->txlock);
582}
583
584static int gfar_alloc_tx_queues(struct gfar_private *priv) 568static int gfar_alloc_tx_queues(struct gfar_private *priv)
585{ 569{
586 int i; 570 int i;
@@ -1376,7 +1360,6 @@ static int gfar_probe(struct platform_device *ofdev)
1376 priv->dev = &ofdev->dev; 1360 priv->dev = &ofdev->dev;
1377 SET_NETDEV_DEV(dev, &ofdev->dev); 1361 SET_NETDEV_DEV(dev, &ofdev->dev);
1378 1362
1379 spin_lock_init(&priv->bflock);
1380 INIT_WORK(&priv->reset_task, gfar_reset_task); 1363 INIT_WORK(&priv->reset_task, gfar_reset_task);
1381 1364
1382 platform_set_drvdata(ofdev, priv); 1365 platform_set_drvdata(ofdev, priv);
@@ -1470,9 +1453,8 @@ static int gfar_probe(struct platform_device *ofdev)
1470 goto register_fail; 1453 goto register_fail;
1471 } 1454 }
1472 1455
1473 device_init_wakeup(&dev->dev, 1456 device_set_wakeup_capable(&dev->dev, priv->device_flags &
1474 priv->device_flags & 1457 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1475 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1476 1458
1477 /* fill out IRQ number and name fields */ 1459 /* fill out IRQ number and name fields */
1478 for (i = 0; i < priv->num_grps; i++) { 1460 for (i = 0; i < priv->num_grps; i++) {
@@ -1540,48 +1522,37 @@ static int gfar_suspend(struct device *dev)
1540 struct gfar_private *priv = dev_get_drvdata(dev); 1522 struct gfar_private *priv = dev_get_drvdata(dev);
1541 struct net_device *ndev = priv->ndev; 1523 struct net_device *ndev = priv->ndev;
1542 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1524 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1543 unsigned long flags;
1544 u32 tempval; 1525 u32 tempval;
1545
1546 int magic_packet = priv->wol_en && 1526 int magic_packet = priv->wol_en &&
1547 (priv->device_flags & 1527 (priv->device_flags &
1548 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1528 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1549 1529
1530 if (!netif_running(ndev))
1531 return 0;
1532
1533 disable_napi(priv);
1534 netif_tx_lock(ndev);
1550 netif_device_detach(ndev); 1535 netif_device_detach(ndev);
1536 netif_tx_unlock(ndev);
1551 1537
1552 if (netif_running(ndev)) { 1538 gfar_halt(priv);
1553 1539
1554 local_irq_save(flags); 1540 if (magic_packet) {
1555 lock_tx_qs(priv); 1541 /* Enable interrupt on Magic Packet */
1542 gfar_write(&regs->imask, IMASK_MAG);
1556 1543
1557 gfar_halt_nodisable(priv); 1544 /* Enable Magic Packet mode */
1545 tempval = gfar_read(&regs->maccfg2);
1546 tempval |= MACCFG2_MPEN;
1547 gfar_write(&regs->maccfg2, tempval);
1558 1548
1559 /* Disable Tx, and Rx if wake-on-LAN is disabled. */ 1549 /* re-enable the Rx block */
1560 tempval = gfar_read(&regs->maccfg1); 1550 tempval = gfar_read(&regs->maccfg1);
1561 1551 tempval |= MACCFG1_RX_EN;
1562 tempval &= ~MACCFG1_TX_EN;
1563
1564 if (!magic_packet)
1565 tempval &= ~MACCFG1_RX_EN;
1566
1567 gfar_write(&regs->maccfg1, tempval); 1552 gfar_write(&regs->maccfg1, tempval);
1568 1553
1569 unlock_tx_qs(priv); 1554 } else {
1570 local_irq_restore(flags); 1555 phy_stop(priv->phydev);
1571
1572 disable_napi(priv);
1573
1574 if (magic_packet) {
1575 /* Enable interrupt on Magic Packet */
1576 gfar_write(&regs->imask, IMASK_MAG);
1577
1578 /* Enable Magic Packet mode */
1579 tempval = gfar_read(&regs->maccfg2);
1580 tempval |= MACCFG2_MPEN;
1581 gfar_write(&regs->maccfg2, tempval);
1582 } else {
1583 phy_stop(priv->phydev);
1584 }
1585 } 1556 }
1586 1557
1587 return 0; 1558 return 0;
@@ -1592,37 +1563,26 @@ static int gfar_resume(struct device *dev)
1592 struct gfar_private *priv = dev_get_drvdata(dev); 1563 struct gfar_private *priv = dev_get_drvdata(dev);
1593 struct net_device *ndev = priv->ndev; 1564 struct net_device *ndev = priv->ndev;
1594 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1565 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1595 unsigned long flags;
1596 u32 tempval; 1566 u32 tempval;
1597 int magic_packet = priv->wol_en && 1567 int magic_packet = priv->wol_en &&
1598 (priv->device_flags & 1568 (priv->device_flags &
1599 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1569 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1600 1570
1601 if (!netif_running(ndev)) { 1571 if (!netif_running(ndev))
1602 netif_device_attach(ndev);
1603 return 0; 1572 return 0;
1604 }
1605 1573
1606 if (!magic_packet && priv->phydev) 1574 if (magic_packet) {
1575 /* Disable Magic Packet mode */
1576 tempval = gfar_read(&regs->maccfg2);
1577 tempval &= ~MACCFG2_MPEN;
1578 gfar_write(&regs->maccfg2, tempval);
1579 } else {
1607 phy_start(priv->phydev); 1580 phy_start(priv->phydev);
1608 1581 }
1609 /* Disable Magic Packet mode, in case something
1610 * else woke us up.
1611 */
1612 local_irq_save(flags);
1613 lock_tx_qs(priv);
1614
1615 tempval = gfar_read(&regs->maccfg2);
1616 tempval &= ~MACCFG2_MPEN;
1617 gfar_write(&regs->maccfg2, tempval);
1618 1582
1619 gfar_start(priv); 1583 gfar_start(priv);
1620 1584
1621 unlock_tx_qs(priv);
1622 local_irq_restore(flags);
1623
1624 netif_device_attach(ndev); 1585 netif_device_attach(ndev);
1625
1626 enable_napi(priv); 1586 enable_napi(priv);
1627 1587
1628 return 0; 1588 return 0;
@@ -2045,7 +2005,8 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
2045 /* Install our interrupt handlers for Error, 2005 /* Install our interrupt handlers for Error,
2046 * Transmit, and Receive 2006 * Transmit, and Receive
2047 */ 2007 */
2048 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0, 2008 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error,
2009 IRQF_NO_SUSPEND,
2049 gfar_irq(grp, ER)->name, grp); 2010 gfar_irq(grp, ER)->name, grp);
2050 if (err < 0) { 2011 if (err < 0) {
2051 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 2012 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
@@ -2068,7 +2029,8 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
2068 goto rx_irq_fail; 2029 goto rx_irq_fail;
2069 } 2030 }
2070 } else { 2031 } else {
2071 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0, 2032 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt,
2033 IRQF_NO_SUSPEND,
2072 gfar_irq(grp, TX)->name, grp); 2034 gfar_irq(grp, TX)->name, grp);
2073 if (err < 0) { 2035 if (err < 0) {
2074 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 2036 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
@@ -2140,6 +2102,11 @@ int startup_gfar(struct net_device *ndev)
2140 /* Start Rx/Tx DMA and enable the interrupts */ 2102 /* Start Rx/Tx DMA and enable the interrupts */
2141 gfar_start(priv); 2103 gfar_start(priv);
2142 2104
2105 /* force link state update after mac reset */
2106 priv->oldlink = 0;
2107 priv->oldspeed = 0;
2108 priv->oldduplex = -1;
2109
2143 phy_start(priv->phydev); 2110 phy_start(priv->phydev);
2144 2111
2145 enable_napi(priv); 2112 enable_napi(priv);
@@ -2169,8 +2136,6 @@ static int gfar_enet_open(struct net_device *dev)
2169 if (err) 2136 if (err)
2170 return err; 2137 return err;
2171 2138
2172 device_set_wakeup_enable(&dev->dev, priv->wol_en);
2173
2174 return err; 2139 return err;
2175} 2140}
2176 2141
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index daa1d37de642..5545e4103368 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -1145,9 +1145,6 @@ struct gfar_private {
1145 int oldduplex; 1145 int oldduplex;
1146 int oldlink; 1146 int oldlink;
1147 1147
1148 /* Bitfield update lock */
1149 spinlock_t bflock;
1150
1151 uint32_t msg_enable; 1148 uint32_t msg_enable;
1152 1149
1153 struct work_struct reset_task; 1150 struct work_struct reset_task;
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index fda12fb32ec7..5b90fcf96265 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -653,7 +653,6 @@ static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
653static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 653static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
654{ 654{
655 struct gfar_private *priv = netdev_priv(dev); 655 struct gfar_private *priv = netdev_priv(dev);
656 unsigned long flags;
657 656
658 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && 657 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
659 wol->wolopts != 0) 658 wol->wolopts != 0)
@@ -664,9 +663,7 @@ static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
664 663
665 device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC); 664 device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC);
666 665
667 spin_lock_irqsave(&priv->bflock, flags); 666 priv->wol_en = !!device_may_wakeup(&dev->dev);
668 priv->wol_en = !!device_may_wakeup(&dev->dev);
669 spin_unlock_irqrestore(&priv->bflock, flags);
670 667
671 return 0; 668 return 0;
672} 669}
@@ -903,27 +900,6 @@ static int gfar_check_filer_hardware(struct gfar_private *priv)
903 return 0; 900 return 0;
904} 901}
905 902
906static int gfar_comp_asc(const void *a, const void *b)
907{
908 return memcmp(a, b, 4);
909}
910
911static int gfar_comp_desc(const void *a, const void *b)
912{
913 return -memcmp(a, b, 4);
914}
915
916static void gfar_swap(void *a, void *b, int size)
917{
918 u32 *_a = a;
919 u32 *_b = b;
920
921 swap(_a[0], _b[0]);
922 swap(_a[1], _b[1]);
923 swap(_a[2], _b[2]);
924 swap(_a[3], _b[3]);
925}
926
927/* Write a mask to filer cache */ 903/* Write a mask to filer cache */
928static void gfar_set_mask(u32 mask, struct filer_table *tab) 904static void gfar_set_mask(u32 mask, struct filer_table *tab)
929{ 905{
@@ -1273,310 +1249,6 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
1273 return 0; 1249 return 0;
1274} 1250}
1275 1251
1276/* Copy size filer entries */
1277static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0],
1278 struct gfar_filer_entry src[0], s32 size)
1279{
1280 while (size > 0) {
1281 size--;
1282 dst[size].ctrl = src[size].ctrl;
1283 dst[size].prop = src[size].prop;
1284 }
1285}
1286
1287/* Delete the contents of the filer-table between start and end
1288 * and collapse them
1289 */
1290static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab)
1291{
1292 int length;
1293
1294 if (end > MAX_FILER_CACHE_IDX || end < begin)
1295 return -EINVAL;
1296
1297 end++;
1298 length = end - begin;
1299
1300 /* Copy */
1301 while (end < tab->index) {
1302 tab->fe[begin].ctrl = tab->fe[end].ctrl;
1303 tab->fe[begin++].prop = tab->fe[end++].prop;
1304
1305 }
1306 /* Fill up with don't cares */
1307 while (begin < tab->index) {
1308 tab->fe[begin].ctrl = 0x60;
1309 tab->fe[begin].prop = 0xFFFFFFFF;
1310 begin++;
1311 }
1312
1313 tab->index -= length;
1314 return 0;
1315}
1316
1317/* Make space on the wanted location */
1318static int gfar_expand_filer_entries(u32 begin, u32 length,
1319 struct filer_table *tab)
1320{
1321 if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX ||
1322 begin > MAX_FILER_CACHE_IDX)
1323 return -EINVAL;
1324
1325 gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]),
1326 tab->index - length + 1);
1327
1328 tab->index += length;
1329 return 0;
1330}
1331
1332static int gfar_get_next_cluster_start(int start, struct filer_table *tab)
1333{
1334 for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
1335 start++) {
1336 if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
1337 (RQFCR_AND | RQFCR_CLE))
1338 return start;
1339 }
1340 return -1;
1341}
1342
1343static int gfar_get_next_cluster_end(int start, struct filer_table *tab)
1344{
1345 for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
1346 start++) {
1347 if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
1348 (RQFCR_CLE))
1349 return start;
1350 }
1351 return -1;
1352}
1353
1354/* Uses hardwares clustering option to reduce
1355 * the number of filer table entries
1356 */
1357static void gfar_cluster_filer(struct filer_table *tab)
1358{
1359 s32 i = -1, j, iend, jend;
1360
1361 while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) {
1362 j = i;
1363 while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) {
1364 /* The cluster entries self and the previous one
1365 * (a mask) must be identical!
1366 */
1367 if (tab->fe[i].ctrl != tab->fe[j].ctrl)
1368 break;
1369 if (tab->fe[i].prop != tab->fe[j].prop)
1370 break;
1371 if (tab->fe[i - 1].ctrl != tab->fe[j - 1].ctrl)
1372 break;
1373 if (tab->fe[i - 1].prop != tab->fe[j - 1].prop)
1374 break;
1375 iend = gfar_get_next_cluster_end(i, tab);
1376 jend = gfar_get_next_cluster_end(j, tab);
1377 if (jend == -1 || iend == -1)
1378 break;
1379
1380 /* First we make some free space, where our cluster
1381 * element should be. Then we copy it there and finally
1382 * delete in from its old location.
1383 */
1384 if (gfar_expand_filer_entries(iend, (jend - j), tab) ==
1385 -EINVAL)
1386 break;
1387
1388 gfar_copy_filer_entries(&(tab->fe[iend + 1]),
1389 &(tab->fe[jend + 1]), jend - j);
1390
1391 if (gfar_trim_filer_entries(jend - 1,
1392 jend + (jend - j),
1393 tab) == -EINVAL)
1394 return;
1395
1396 /* Mask out cluster bit */
1397 tab->fe[iend].ctrl &= ~(RQFCR_CLE);
1398 }
1399 }
1400}
1401
1402/* Swaps the masked bits of a1<>a2 and b1<>b2 */
1403static void gfar_swap_bits(struct gfar_filer_entry *a1,
1404 struct gfar_filer_entry *a2,
1405 struct gfar_filer_entry *b1,
1406 struct gfar_filer_entry *b2, u32 mask)
1407{
1408 u32 temp[4];
1409 temp[0] = a1->ctrl & mask;
1410 temp[1] = a2->ctrl & mask;
1411 temp[2] = b1->ctrl & mask;
1412 temp[3] = b2->ctrl & mask;
1413
1414 a1->ctrl &= ~mask;
1415 a2->ctrl &= ~mask;
1416 b1->ctrl &= ~mask;
1417 b2->ctrl &= ~mask;
1418
1419 a1->ctrl |= temp[1];
1420 a2->ctrl |= temp[0];
1421 b1->ctrl |= temp[3];
1422 b2->ctrl |= temp[2];
1423}
1424
1425/* Generate a list consisting of masks values with their start and
1426 * end of validity and block as indicator for parts belonging
1427 * together (glued by ANDs) in mask_table
1428 */
1429static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
1430 struct filer_table *tab)
1431{
1432 u32 i, and_index = 0, block_index = 1;
1433
1434 for (i = 0; i < tab->index; i++) {
1435
1436 /* LSByte of control = 0 sets a mask */
1437 if (!(tab->fe[i].ctrl & 0xF)) {
1438 mask_table[and_index].mask = tab->fe[i].prop;
1439 mask_table[and_index].start = i;
1440 mask_table[and_index].block = block_index;
1441 if (and_index >= 1)
1442 mask_table[and_index - 1].end = i - 1;
1443 and_index++;
1444 }
1445 /* cluster starts and ends will be separated because they should
1446 * hold their position
1447 */
1448 if (tab->fe[i].ctrl & RQFCR_CLE)
1449 block_index++;
1450 /* A not set AND indicates the end of a depended block */
1451 if (!(tab->fe[i].ctrl & RQFCR_AND))
1452 block_index++;
1453 }
1454
1455 mask_table[and_index - 1].end = i - 1;
1456
1457 return and_index;
1458}
1459
1460/* Sorts the entries of mask_table by the values of the masks.
1461 * Important: The 0xFF80 flags of the first and last entry of a
1462 * block must hold their position (which queue, CLusterEnable, ReJEct,
1463 * AND)
1464 */
1465static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
1466 struct filer_table *temp_table, u32 and_index)
1467{
1468 /* Pointer to compare function (_asc or _desc) */
1469 int (*gfar_comp)(const void *, const void *);
1470
1471 u32 i, size = 0, start = 0, prev = 1;
1472 u32 old_first, old_last, new_first, new_last;
1473
1474 gfar_comp = &gfar_comp_desc;
1475
1476 for (i = 0; i < and_index; i++) {
1477 if (prev != mask_table[i].block) {
1478 old_first = mask_table[start].start + 1;
1479 old_last = mask_table[i - 1].end;
1480 sort(mask_table + start, size,
1481 sizeof(struct gfar_mask_entry),
1482 gfar_comp, &gfar_swap);
1483
1484 /* Toggle order for every block. This makes the
1485 * thing more efficient!
1486 */
1487 if (gfar_comp == gfar_comp_desc)
1488 gfar_comp = &gfar_comp_asc;
1489 else
1490 gfar_comp = &gfar_comp_desc;
1491
1492 new_first = mask_table[start].start + 1;
1493 new_last = mask_table[i - 1].end;
1494
1495 gfar_swap_bits(&temp_table->fe[new_first],
1496 &temp_table->fe[old_first],
1497 &temp_table->fe[new_last],
1498 &temp_table->fe[old_last],
1499 RQFCR_QUEUE | RQFCR_CLE |
1500 RQFCR_RJE | RQFCR_AND);
1501
1502 start = i;
1503 size = 0;
1504 }
1505 size++;
1506 prev = mask_table[i].block;
1507 }
1508}
1509
1510/* Reduces the number of masks needed in the filer table to save entries
1511 * This is done by sorting the masks of a depended block. A depended block is
1512 * identified by gluing ANDs or CLE. The sorting order toggles after every
1513 * block. Of course entries in scope of a mask must change their location with
1514 * it.
1515 */
1516static int gfar_optimize_filer_masks(struct filer_table *tab)
1517{
1518 struct filer_table *temp_table;
1519 struct gfar_mask_entry *mask_table;
1520
1521 u32 and_index = 0, previous_mask = 0, i = 0, j = 0, size = 0;
1522 s32 ret = 0;
1523
1524 /* We need a copy of the filer table because
1525 * we want to change its order
1526 */
1527 temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL);
1528 if (temp_table == NULL)
1529 return -ENOMEM;
1530
1531 mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1,
1532 sizeof(struct gfar_mask_entry), GFP_KERNEL);
1533
1534 if (mask_table == NULL) {
1535 ret = -ENOMEM;
1536 goto end;
1537 }
1538
1539 and_index = gfar_generate_mask_table(mask_table, tab);
1540
1541 gfar_sort_mask_table(mask_table, temp_table, and_index);
1542
1543 /* Now we can copy the data from our duplicated filer table to
1544 * the real one in the order the mask table says
1545 */
1546 for (i = 0; i < and_index; i++) {
1547 size = mask_table[i].end - mask_table[i].start + 1;
1548 gfar_copy_filer_entries(&(tab->fe[j]),
1549 &(temp_table->fe[mask_table[i].start]), size);
1550 j += size;
1551 }
1552
1553 /* And finally we just have to check for duplicated masks and drop the
1554 * second ones
1555 */
1556 for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
1557 if (tab->fe[i].ctrl == 0x80) {
1558 previous_mask = i++;
1559 break;
1560 }
1561 }
1562 for (; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
1563 if (tab->fe[i].ctrl == 0x80) {
1564 if (tab->fe[i].prop == tab->fe[previous_mask].prop) {
1565 /* Two identical ones found!
1566 * So drop the second one!
1567 */
1568 gfar_trim_filer_entries(i, i, tab);
1569 } else
1570 /* Not identical! */
1571 previous_mask = i;
1572 }
1573 }
1574
1575 kfree(mask_table);
1576end: kfree(temp_table);
1577 return ret;
1578}
1579
1580/* Write the bit-pattern from software's buffer to hardware registers */ 1252/* Write the bit-pattern from software's buffer to hardware registers */
1581static int gfar_write_filer_table(struct gfar_private *priv, 1253static int gfar_write_filer_table(struct gfar_private *priv,
1582 struct filer_table *tab) 1254 struct filer_table *tab)
@@ -1586,11 +1258,10 @@ static int gfar_write_filer_table(struct gfar_private *priv,
1586 return -EBUSY; 1258 return -EBUSY;
1587 1259
1588 /* Fill regular entries */ 1260 /* Fill regular entries */
1589 for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].prop); 1261 for (; i < MAX_FILER_IDX && (tab->fe[i].ctrl | tab->fe[i].prop); i++)
1590 i++)
1591 gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop); 1262 gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
1592 /* Fill the rest with fall-troughs */ 1263 /* Fill the rest with fall-troughs */
1593 for (; i < MAX_FILER_IDX - 1; i++) 1264 for (; i < MAX_FILER_IDX; i++)
1594 gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF); 1265 gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF);
1595 /* Last entry must be default accept 1266 /* Last entry must be default accept
1596 * because that's what people expect 1267 * because that's what people expect
@@ -1624,7 +1295,6 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
1624{ 1295{
1625 struct ethtool_flow_spec_container *j; 1296 struct ethtool_flow_spec_container *j;
1626 struct filer_table *tab; 1297 struct filer_table *tab;
1627 s32 i = 0;
1628 s32 ret = 0; 1298 s32 ret = 0;
1629 1299
1630 /* So index is set to zero, too! */ 1300 /* So index is set to zero, too! */
@@ -1649,17 +1319,6 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
1649 } 1319 }
1650 } 1320 }
1651 1321
1652 i = tab->index;
1653
1654 /* Optimizations to save entries */
1655 gfar_cluster_filer(tab);
1656 gfar_optimize_filer_masks(tab);
1657
1658 pr_debug("\tSummary:\n"
1659 "\tData on hardware: %d\n"
1660 "\tCompression rate: %d%%\n",
1661 tab->index, 100 - (100 * tab->index) / i);
1662
1663 /* Write everything to hardware */ 1322 /* Write everything to hardware */
1664 ret = gfar_write_filer_table(priv, tab); 1323 ret = gfar_write_filer_table(priv, tab);
1665 if (ret == -EBUSY) { 1324 if (ret == -EBUSY) {
@@ -1725,13 +1384,14 @@ static int gfar_add_cls(struct gfar_private *priv,
1725 } 1384 }
1726 1385
1727process: 1386process:
1387 priv->rx_list.count++;
1728 ret = gfar_process_filer_changes(priv); 1388 ret = gfar_process_filer_changes(priv);
1729 if (ret) 1389 if (ret)
1730 goto clean_list; 1390 goto clean_list;
1731 priv->rx_list.count++;
1732 return ret; 1391 return ret;
1733 1392
1734clean_list: 1393clean_list:
1394 priv->rx_list.count--;
1735 list_del(&temp->list); 1395 list_del(&temp->list);
1736clean_mem: 1396clean_mem:
1737 kfree(temp); 1397 kfree(temp);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 982fdcdc795b..b5b2925103ec 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -216,7 +216,7 @@ static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring,
216 216
217static inline bool fm10k_page_is_reserved(struct page *page) 217static inline bool fm10k_page_is_reserved(struct page *page)
218{ 218{
219 return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; 219 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
220} 220}
221 221
222static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer, 222static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 2f70a9b152bd..830466c49987 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -6566,7 +6566,7 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
6566 6566
6567static inline bool igb_page_is_reserved(struct page *page) 6567static inline bool igb_page_is_reserved(struct page *page)
6568{ 6568{
6569 return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; 6569 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
6570} 6570}
6571 6571
6572static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, 6572static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 9aa6104e34ea..ae21e0b06c3a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1832,7 +1832,7 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
1832 1832
1833static inline bool ixgbe_page_is_reserved(struct page *page) 1833static inline bool ixgbe_page_is_reserved(struct page *page)
1834{ 1834{
1835 return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; 1835 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
1836} 1836}
1837 1837
1838/** 1838/**
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index e71cdde9cb01..1d7b00b038a2 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -765,7 +765,7 @@ static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
765 765
766static inline bool ixgbevf_page_is_reserved(struct page *page) 766static inline bool ixgbevf_page_is_reserved(struct page *page)
767{ 767{
768 return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; 768 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
769} 769}
770 770
771/** 771/**
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 370e20ed224c..62e48bc0cb23 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1462,7 +1462,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1462 struct mvneta_rx_queue *rxq) 1462 struct mvneta_rx_queue *rxq)
1463{ 1463{
1464 struct net_device *dev = pp->dev; 1464 struct net_device *dev = pp->dev;
1465 int rx_done, rx_filled; 1465 int rx_done;
1466 u32 rcvd_pkts = 0; 1466 u32 rcvd_pkts = 0;
1467 u32 rcvd_bytes = 0; 1467 u32 rcvd_bytes = 0;
1468 1468
@@ -1473,7 +1473,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1473 rx_todo = rx_done; 1473 rx_todo = rx_done;
1474 1474
1475 rx_done = 0; 1475 rx_done = 0;
1476 rx_filled = 0;
1477 1476
1478 /* Fairness NAPI loop */ 1477 /* Fairness NAPI loop */
1479 while (rx_done < rx_todo) { 1478 while (rx_done < rx_todo) {
@@ -1484,7 +1483,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1484 int rx_bytes, err; 1483 int rx_bytes, err;
1485 1484
1486 rx_done++; 1485 rx_done++;
1487 rx_filled++;
1488 rx_status = rx_desc->status; 1486 rx_status = rx_desc->status;
1489 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); 1487 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
1490 data = (unsigned char *)rx_desc->buf_cookie; 1488 data = (unsigned char *)rx_desc->buf_cookie;
@@ -1524,6 +1522,14 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1524 continue; 1522 continue;
1525 } 1523 }
1526 1524
1525 /* Refill processing */
1526 err = mvneta_rx_refill(pp, rx_desc);
1527 if (err) {
1528 netdev_err(dev, "Linux processing - Can't refill\n");
1529 rxq->missed++;
1530 goto err_drop_frame;
1531 }
1532
1527 skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size); 1533 skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size);
1528 if (!skb) 1534 if (!skb)
1529 goto err_drop_frame; 1535 goto err_drop_frame;
@@ -1543,14 +1549,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1543 mvneta_rx_csum(pp, rx_status, skb); 1549 mvneta_rx_csum(pp, rx_status, skb);
1544 1550
1545 napi_gro_receive(&pp->napi, skb); 1551 napi_gro_receive(&pp->napi, skb);
1546
1547 /* Refill processing */
1548 err = mvneta_rx_refill(pp, rx_desc);
1549 if (err) {
1550 netdev_err(dev, "Linux processing - Can't refill\n");
1551 rxq->missed++;
1552 rx_filled--;
1553 }
1554 } 1552 }
1555 1553
1556 if (rcvd_pkts) { 1554 if (rcvd_pkts) {
@@ -1563,7 +1561,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1563 } 1561 }
1564 1562
1565 /* Update rxq management counters */ 1563 /* Update rxq management counters */
1566 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled); 1564 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1567 1565
1568 return rx_done; 1566 return rx_done;
1569} 1567}
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 3e8b1bfb1f2e..d9884fd15b45 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -27,6 +27,8 @@
27#include <linux/of_address.h> 27#include <linux/of_address.h>
28#include <linux/phy.h> 28#include <linux/phy.h>
29#include <linux/clk.h> 29#include <linux/clk.h>
30#include <linux/hrtimer.h>
31#include <linux/ktime.h>
30#include <uapi/linux/ppp_defs.h> 32#include <uapi/linux/ppp_defs.h>
31#include <net/ip.h> 33#include <net/ip.h>
32#include <net/ipv6.h> 34#include <net/ipv6.h>
@@ -299,6 +301,7 @@
299 301
300/* Coalescing */ 302/* Coalescing */
301#define MVPP2_TXDONE_COAL_PKTS_THRESH 15 303#define MVPP2_TXDONE_COAL_PKTS_THRESH 15
304#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
302#define MVPP2_RX_COAL_PKTS 32 305#define MVPP2_RX_COAL_PKTS 32
303#define MVPP2_RX_COAL_USEC 100 306#define MVPP2_RX_COAL_USEC 100
304 307
@@ -660,6 +663,14 @@ struct mvpp2_pcpu_stats {
660 u64 tx_bytes; 663 u64 tx_bytes;
661}; 664};
662 665
666/* Per-CPU port control */
667struct mvpp2_port_pcpu {
668 struct hrtimer tx_done_timer;
669 bool timer_scheduled;
670 /* Tasklet for egress finalization */
671 struct tasklet_struct tx_done_tasklet;
672};
673
663struct mvpp2_port { 674struct mvpp2_port {
664 u8 id; 675 u8 id;
665 676
@@ -679,6 +690,9 @@ struct mvpp2_port {
679 u32 pending_cause_rx; 690 u32 pending_cause_rx;
680 struct napi_struct napi; 691 struct napi_struct napi;
681 692
693 /* Per-CPU port control */
694 struct mvpp2_port_pcpu __percpu *pcpu;
695
682 /* Flags */ 696 /* Flags */
683 unsigned long flags; 697 unsigned long flags;
684 698
@@ -776,6 +790,9 @@ struct mvpp2_txq_pcpu {
776 /* Array of transmitted skb */ 790 /* Array of transmitted skb */
777 struct sk_buff **tx_skb; 791 struct sk_buff **tx_skb;
778 792
793 /* Array of transmitted buffers' physical addresses */
794 dma_addr_t *tx_buffs;
795
779 /* Index of last TX DMA descriptor that was inserted */ 796 /* Index of last TX DMA descriptor that was inserted */
780 int txq_put_index; 797 int txq_put_index;
781 798
@@ -913,8 +930,6 @@ struct mvpp2_bm_pool {
913 /* Occupied buffers indicator */ 930 /* Occupied buffers indicator */
914 atomic_t in_use; 931 atomic_t in_use;
915 int in_use_thresh; 932 int in_use_thresh;
916
917 spinlock_t lock;
918}; 933};
919 934
920struct mvpp2_buff_hdr { 935struct mvpp2_buff_hdr {
@@ -963,9 +978,13 @@ static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
963} 978}
964 979
965static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu, 980static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
966 struct sk_buff *skb) 981 struct sk_buff *skb,
982 struct mvpp2_tx_desc *tx_desc)
967{ 983{
968 txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb; 984 txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb;
985 if (skb)
986 txq_pcpu->tx_buffs[txq_pcpu->txq_put_index] =
987 tx_desc->buf_phys_addr;
969 txq_pcpu->txq_put_index++; 988 txq_pcpu->txq_put_index++;
970 if (txq_pcpu->txq_put_index == txq_pcpu->size) 989 if (txq_pcpu->txq_put_index == txq_pcpu->size)
971 txq_pcpu->txq_put_index = 0; 990 txq_pcpu->txq_put_index = 0;
@@ -3376,7 +3395,6 @@ static int mvpp2_bm_pool_create(struct platform_device *pdev,
3376 bm_pool->pkt_size = 0; 3395 bm_pool->pkt_size = 0;
3377 bm_pool->buf_num = 0; 3396 bm_pool->buf_num = 0;
3378 atomic_set(&bm_pool->in_use, 0); 3397 atomic_set(&bm_pool->in_use, 0);
3379 spin_lock_init(&bm_pool->lock);
3380 3398
3381 return 0; 3399 return 0;
3382} 3400}
@@ -3647,7 +3665,6 @@ static struct mvpp2_bm_pool *
3647mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, 3665mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
3648 int pkt_size) 3666 int pkt_size)
3649{ 3667{
3650 unsigned long flags = 0;
3651 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; 3668 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
3652 int num; 3669 int num;
3653 3670
@@ -3656,8 +3673,6 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
3656 return NULL; 3673 return NULL;
3657 } 3674 }
3658 3675
3659 spin_lock_irqsave(&new_pool->lock, flags);
3660
3661 if (new_pool->type == MVPP2_BM_FREE) 3676 if (new_pool->type == MVPP2_BM_FREE)
3662 new_pool->type = type; 3677 new_pool->type = type;
3663 3678
@@ -3686,8 +3701,6 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
3686 if (num != pkts_num) { 3701 if (num != pkts_num) {
3687 WARN(1, "pool %d: %d of %d allocated\n", 3702 WARN(1, "pool %d: %d of %d allocated\n",
3688 new_pool->id, num, pkts_num); 3703 new_pool->id, num, pkts_num);
3689 /* We need to undo the bufs_add() allocations */
3690 spin_unlock_irqrestore(&new_pool->lock, flags);
3691 return NULL; 3704 return NULL;
3692 } 3705 }
3693 } 3706 }
@@ -3695,15 +3708,12 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
3695 mvpp2_bm_pool_bufsize_set(port->priv, new_pool, 3708 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
3696 MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); 3709 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
3697 3710
3698 spin_unlock_irqrestore(&new_pool->lock, flags);
3699
3700 return new_pool; 3711 return new_pool;
3701} 3712}
3702 3713
3703/* Initialize pools for swf */ 3714/* Initialize pools for swf */
3704static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) 3715static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
3705{ 3716{
3706 unsigned long flags = 0;
3707 int rxq; 3717 int rxq;
3708 3718
3709 if (!port->pool_long) { 3719 if (!port->pool_long) {
@@ -3714,9 +3724,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
3714 if (!port->pool_long) 3724 if (!port->pool_long)
3715 return -ENOMEM; 3725 return -ENOMEM;
3716 3726
3717 spin_lock_irqsave(&port->pool_long->lock, flags);
3718 port->pool_long->port_map |= (1 << port->id); 3727 port->pool_long->port_map |= (1 << port->id);
3719 spin_unlock_irqrestore(&port->pool_long->lock, flags);
3720 3728
3721 for (rxq = 0; rxq < rxq_number; rxq++) 3729 for (rxq = 0; rxq < rxq_number; rxq++)
3722 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id); 3730 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
@@ -3730,9 +3738,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
3730 if (!port->pool_short) 3738 if (!port->pool_short)
3731 return -ENOMEM; 3739 return -ENOMEM;
3732 3740
3733 spin_lock_irqsave(&port->pool_short->lock, flags);
3734 port->pool_short->port_map |= (1 << port->id); 3741 port->pool_short->port_map |= (1 << port->id);
3735 spin_unlock_irqrestore(&port->pool_short->lock, flags);
3736 3742
3737 for (rxq = 0; rxq < rxq_number; rxq++) 3743 for (rxq = 0; rxq < rxq_number; rxq++)
3738 mvpp2_rxq_short_pool_set(port, rxq, 3744 mvpp2_rxq_short_pool_set(port, rxq,
@@ -3806,7 +3812,6 @@ static void mvpp2_interrupts_unmask(void *arg)
3806 3812
3807 mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id), 3813 mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id),
3808 (MVPP2_CAUSE_MISC_SUM_MASK | 3814 (MVPP2_CAUSE_MISC_SUM_MASK |
3809 MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK |
3810 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK)); 3815 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
3811} 3816}
3812 3817
@@ -4382,23 +4387,6 @@ static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
4382 rxq->time_coal = usec; 4387 rxq->time_coal = usec;
4383} 4388}
4384 4389
4385/* Set threshold for TX_DONE pkts coalescing */
4386static void mvpp2_tx_done_pkts_coal_set(void *arg)
4387{
4388 struct mvpp2_port *port = arg;
4389 int queue;
4390 u32 val;
4391
4392 for (queue = 0; queue < txq_number; queue++) {
4393 struct mvpp2_tx_queue *txq = port->txqs[queue];
4394
4395 val = (txq->done_pkts_coal << MVPP2_TRANSMITTED_THRESH_OFFSET) &
4396 MVPP2_TRANSMITTED_THRESH_MASK;
4397 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4398 mvpp2_write(port->priv, MVPP2_TXQ_THRESH_REG, val);
4399 }
4400}
4401
4402/* Free Tx queue skbuffs */ 4390/* Free Tx queue skbuffs */
4403static void mvpp2_txq_bufs_free(struct mvpp2_port *port, 4391static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
4404 struct mvpp2_tx_queue *txq, 4392 struct mvpp2_tx_queue *txq,
@@ -4407,8 +4395,8 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
4407 int i; 4395 int i;
4408 4396
4409 for (i = 0; i < num; i++) { 4397 for (i = 0; i < num; i++) {
4410 struct mvpp2_tx_desc *tx_desc = txq->descs + 4398 dma_addr_t buf_phys_addr =
4411 txq_pcpu->txq_get_index; 4399 txq_pcpu->tx_buffs[txq_pcpu->txq_get_index];
4412 struct sk_buff *skb = txq_pcpu->tx_skb[txq_pcpu->txq_get_index]; 4400 struct sk_buff *skb = txq_pcpu->tx_skb[txq_pcpu->txq_get_index];
4413 4401
4414 mvpp2_txq_inc_get(txq_pcpu); 4402 mvpp2_txq_inc_get(txq_pcpu);
@@ -4416,8 +4404,8 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
4416 if (!skb) 4404 if (!skb)
4417 continue; 4405 continue;
4418 4406
4419 dma_unmap_single(port->dev->dev.parent, tx_desc->buf_phys_addr, 4407 dma_unmap_single(port->dev->dev.parent, buf_phys_addr,
4420 tx_desc->data_size, DMA_TO_DEVICE); 4408 skb_headlen(skb), DMA_TO_DEVICE);
4421 dev_kfree_skb_any(skb); 4409 dev_kfree_skb_any(skb);
4422 } 4410 }
4423} 4411}
@@ -4433,7 +4421,7 @@ static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
4433static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port, 4421static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
4434 u32 cause) 4422 u32 cause)
4435{ 4423{
4436 int queue = fls(cause >> 16) - 1; 4424 int queue = fls(cause) - 1;
4437 4425
4438 return port->txqs[queue]; 4426 return port->txqs[queue];
4439} 4427}
@@ -4460,6 +4448,29 @@ static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
4460 netif_tx_wake_queue(nq); 4448 netif_tx_wake_queue(nq);
4461} 4449}
4462 4450
4451static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause)
4452{
4453 struct mvpp2_tx_queue *txq;
4454 struct mvpp2_txq_pcpu *txq_pcpu;
4455 unsigned int tx_todo = 0;
4456
4457 while (cause) {
4458 txq = mvpp2_get_tx_queue(port, cause);
4459 if (!txq)
4460 break;
4461
4462 txq_pcpu = this_cpu_ptr(txq->pcpu);
4463
4464 if (txq_pcpu->count) {
4465 mvpp2_txq_done(port, txq, txq_pcpu);
4466 tx_todo += txq_pcpu->count;
4467 }
4468
4469 cause &= ~(1 << txq->log_id);
4470 }
4471 return tx_todo;
4472}
4473
4463/* Rx/Tx queue initialization/cleanup methods */ 4474/* Rx/Tx queue initialization/cleanup methods */
4464 4475
4465/* Allocate and initialize descriptors for aggr TXQ */ 4476/* Allocate and initialize descriptors for aggr TXQ */
@@ -4649,12 +4660,13 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
4649 txq_pcpu->tx_skb = kmalloc(txq_pcpu->size * 4660 txq_pcpu->tx_skb = kmalloc(txq_pcpu->size *
4650 sizeof(*txq_pcpu->tx_skb), 4661 sizeof(*txq_pcpu->tx_skb),
4651 GFP_KERNEL); 4662 GFP_KERNEL);
4652 if (!txq_pcpu->tx_skb) { 4663 if (!txq_pcpu->tx_skb)
4653 dma_free_coherent(port->dev->dev.parent, 4664 goto error;
4654 txq->size * MVPP2_DESC_ALIGNED_SIZE, 4665
4655 txq->descs, txq->descs_phys); 4666 txq_pcpu->tx_buffs = kmalloc(txq_pcpu->size *
4656 return -ENOMEM; 4667 sizeof(dma_addr_t), GFP_KERNEL);
4657 } 4668 if (!txq_pcpu->tx_buffs)
4669 goto error;
4658 4670
4659 txq_pcpu->count = 0; 4671 txq_pcpu->count = 0;
4660 txq_pcpu->reserved_num = 0; 4672 txq_pcpu->reserved_num = 0;
@@ -4663,6 +4675,19 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
4663 } 4675 }
4664 4676
4665 return 0; 4677 return 0;
4678
4679error:
4680 for_each_present_cpu(cpu) {
4681 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4682 kfree(txq_pcpu->tx_skb);
4683 kfree(txq_pcpu->tx_buffs);
4684 }
4685
4686 dma_free_coherent(port->dev->dev.parent,
4687 txq->size * MVPP2_DESC_ALIGNED_SIZE,
4688 txq->descs, txq->descs_phys);
4689
4690 return -ENOMEM;
4666} 4691}
4667 4692
4668/* Free allocated TXQ resources */ 4693/* Free allocated TXQ resources */
@@ -4675,6 +4700,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
4675 for_each_present_cpu(cpu) { 4700 for_each_present_cpu(cpu) {
4676 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); 4701 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4677 kfree(txq_pcpu->tx_skb); 4702 kfree(txq_pcpu->tx_skb);
4703 kfree(txq_pcpu->tx_buffs);
4678 } 4704 }
4679 4705
4680 if (txq->descs) 4706 if (txq->descs)
@@ -4805,7 +4831,6 @@ static int mvpp2_setup_txqs(struct mvpp2_port *port)
4805 goto err_cleanup; 4831 goto err_cleanup;
4806 } 4832 }
4807 4833
4808 on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1);
4809 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); 4834 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
4810 return 0; 4835 return 0;
4811 4836
@@ -4887,6 +4912,49 @@ static void mvpp2_link_event(struct net_device *dev)
4887 } 4912 }
4888} 4913}
4889 4914
4915static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
4916{
4917 ktime_t interval;
4918
4919 if (!port_pcpu->timer_scheduled) {
4920 port_pcpu->timer_scheduled = true;
4921 interval = ktime_set(0, MVPP2_TXDONE_HRTIMER_PERIOD_NS);
4922 hrtimer_start(&port_pcpu->tx_done_timer, interval,
4923 HRTIMER_MODE_REL_PINNED);
4924 }
4925}
4926
4927static void mvpp2_tx_proc_cb(unsigned long data)
4928{
4929 struct net_device *dev = (struct net_device *)data;
4930 struct mvpp2_port *port = netdev_priv(dev);
4931 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
4932 unsigned int tx_todo, cause;
4933
4934 if (!netif_running(dev))
4935 return;
4936 port_pcpu->timer_scheduled = false;
4937
4938 /* Process all the Tx queues */
4939 cause = (1 << txq_number) - 1;
4940 tx_todo = mvpp2_tx_done(port, cause);
4941
4942 /* Set the timer in case not all the packets were processed */
4943 if (tx_todo)
4944 mvpp2_timer_set(port_pcpu);
4945}
4946
4947static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
4948{
4949 struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
4950 struct mvpp2_port_pcpu,
4951 tx_done_timer);
4952
4953 tasklet_schedule(&port_pcpu->tx_done_tasklet);
4954
4955 return HRTIMER_NORESTART;
4956}
4957
4890/* Main RX/TX processing routines */ 4958/* Main RX/TX processing routines */
4891 4959
4892/* Display more error info */ 4960/* Display more error info */
@@ -5144,11 +5212,11 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
5144 if (i == (skb_shinfo(skb)->nr_frags - 1)) { 5212 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
5145 /* Last descriptor */ 5213 /* Last descriptor */
5146 tx_desc->command = MVPP2_TXD_L_DESC; 5214 tx_desc->command = MVPP2_TXD_L_DESC;
5147 mvpp2_txq_inc_put(txq_pcpu, skb); 5215 mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
5148 } else { 5216 } else {
5149 /* Descriptor in the middle: Not First, Not Last */ 5217 /* Descriptor in the middle: Not First, Not Last */
5150 tx_desc->command = 0; 5218 tx_desc->command = 0;
5151 mvpp2_txq_inc_put(txq_pcpu, NULL); 5219 mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
5152 } 5220 }
5153 } 5221 }
5154 5222
@@ -5214,12 +5282,12 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
5214 /* First and Last descriptor */ 5282 /* First and Last descriptor */
5215 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC; 5283 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
5216 tx_desc->command = tx_cmd; 5284 tx_desc->command = tx_cmd;
5217 mvpp2_txq_inc_put(txq_pcpu, skb); 5285 mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
5218 } else { 5286 } else {
5219 /* First but not Last */ 5287 /* First but not Last */
5220 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE; 5288 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
5221 tx_desc->command = tx_cmd; 5289 tx_desc->command = tx_cmd;
5222 mvpp2_txq_inc_put(txq_pcpu, NULL); 5290 mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
5223 5291
5224 /* Continue with other skb fragments */ 5292 /* Continue with other skb fragments */
5225 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) { 5293 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
@@ -5255,6 +5323,17 @@ out:
5255 dev_kfree_skb_any(skb); 5323 dev_kfree_skb_any(skb);
5256 } 5324 }
5257 5325
5326 /* Finalize TX processing */
5327 if (txq_pcpu->count >= txq->done_pkts_coal)
5328 mvpp2_txq_done(port, txq, txq_pcpu);
5329
5330 /* Set the timer in case not all frags were processed */
5331 if (txq_pcpu->count <= frags && txq_pcpu->count > 0) {
5332 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
5333
5334 mvpp2_timer_set(port_pcpu);
5335 }
5336
5258 return NETDEV_TX_OK; 5337 return NETDEV_TX_OK;
5259} 5338}
5260 5339
@@ -5268,10 +5347,11 @@ static inline void mvpp2_cause_error(struct net_device *dev, int cause)
5268 netdev_err(dev, "tx fifo underrun error\n"); 5347 netdev_err(dev, "tx fifo underrun error\n");
5269} 5348}
5270 5349
5271static void mvpp2_txq_done_percpu(void *arg) 5350static int mvpp2_poll(struct napi_struct *napi, int budget)
5272{ 5351{
5273 struct mvpp2_port *port = arg; 5352 u32 cause_rx_tx, cause_rx, cause_misc;
5274 u32 cause_rx_tx, cause_tx, cause_misc; 5353 int rx_done = 0;
5354 struct mvpp2_port *port = netdev_priv(napi->dev);
5275 5355
5276 /* Rx/Tx cause register 5356 /* Rx/Tx cause register
5277 * 5357 *
@@ -5285,7 +5365,7 @@ static void mvpp2_txq_done_percpu(void *arg)
5285 */ 5365 */
5286 cause_rx_tx = mvpp2_read(port->priv, 5366 cause_rx_tx = mvpp2_read(port->priv,
5287 MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); 5367 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
5288 cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; 5368 cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
5289 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; 5369 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
5290 5370
5291 if (cause_misc) { 5371 if (cause_misc) {
@@ -5297,26 +5377,6 @@ static void mvpp2_txq_done_percpu(void *arg)
5297 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); 5377 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
5298 } 5378 }
5299 5379
5300 /* Release TX descriptors */
5301 if (cause_tx) {
5302 struct mvpp2_tx_queue *txq = mvpp2_get_tx_queue(port, cause_tx);
5303 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
5304
5305 if (txq_pcpu->count)
5306 mvpp2_txq_done(port, txq, txq_pcpu);
5307 }
5308}
5309
5310static int mvpp2_poll(struct napi_struct *napi, int budget)
5311{
5312 u32 cause_rx_tx, cause_rx;
5313 int rx_done = 0;
5314 struct mvpp2_port *port = netdev_priv(napi->dev);
5315
5316 on_each_cpu(mvpp2_txq_done_percpu, port, 1);
5317
5318 cause_rx_tx = mvpp2_read(port->priv,
5319 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
5320 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; 5380 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
5321 5381
5322 /* Process RX packets */ 5382 /* Process RX packets */
@@ -5561,6 +5621,8 @@ err_cleanup_rxqs:
5561static int mvpp2_stop(struct net_device *dev) 5621static int mvpp2_stop(struct net_device *dev)
5562{ 5622{
5563 struct mvpp2_port *port = netdev_priv(dev); 5623 struct mvpp2_port *port = netdev_priv(dev);
5624 struct mvpp2_port_pcpu *port_pcpu;
5625 int cpu;
5564 5626
5565 mvpp2_stop_dev(port); 5627 mvpp2_stop_dev(port);
5566 mvpp2_phy_disconnect(port); 5628 mvpp2_phy_disconnect(port);
@@ -5569,6 +5631,13 @@ static int mvpp2_stop(struct net_device *dev)
5569 on_each_cpu(mvpp2_interrupts_mask, port, 1); 5631 on_each_cpu(mvpp2_interrupts_mask, port, 1);
5570 5632
5571 free_irq(port->irq, port); 5633 free_irq(port->irq, port);
5634 for_each_present_cpu(cpu) {
5635 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
5636
5637 hrtimer_cancel(&port_pcpu->tx_done_timer);
5638 port_pcpu->timer_scheduled = false;
5639 tasklet_kill(&port_pcpu->tx_done_tasklet);
5640 }
5572 mvpp2_cleanup_rxqs(port); 5641 mvpp2_cleanup_rxqs(port);
5573 mvpp2_cleanup_txqs(port); 5642 mvpp2_cleanup_txqs(port);
5574 5643
@@ -5784,7 +5853,6 @@ static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
5784 txq->done_pkts_coal = c->tx_max_coalesced_frames; 5853 txq->done_pkts_coal = c->tx_max_coalesced_frames;
5785 } 5854 }
5786 5855
5787 on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1);
5788 return 0; 5856 return 0;
5789} 5857}
5790 5858
@@ -6035,6 +6103,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
6035{ 6103{
6036 struct device_node *phy_node; 6104 struct device_node *phy_node;
6037 struct mvpp2_port *port; 6105 struct mvpp2_port *port;
6106 struct mvpp2_port_pcpu *port_pcpu;
6038 struct net_device *dev; 6107 struct net_device *dev;
6039 struct resource *res; 6108 struct resource *res;
6040 const char *dt_mac_addr; 6109 const char *dt_mac_addr;
@@ -6044,7 +6113,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
6044 int features; 6113 int features;
6045 int phy_mode; 6114 int phy_mode;
6046 int priv_common_regs_num = 2; 6115 int priv_common_regs_num = 2;
6047 int err, i; 6116 int err, i, cpu;
6048 6117
6049 dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number, 6118 dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number,
6050 rxq_number); 6119 rxq_number);
@@ -6135,6 +6204,24 @@ static int mvpp2_port_probe(struct platform_device *pdev,
6135 } 6204 }
6136 mvpp2_port_power_up(port); 6205 mvpp2_port_power_up(port);
6137 6206
6207 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
6208 if (!port->pcpu) {
6209 err = -ENOMEM;
6210 goto err_free_txq_pcpu;
6211 }
6212
6213 for_each_present_cpu(cpu) {
6214 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
6215
6216 hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
6217 HRTIMER_MODE_REL_PINNED);
6218 port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
6219 port_pcpu->timer_scheduled = false;
6220
6221 tasklet_init(&port_pcpu->tx_done_tasklet, mvpp2_tx_proc_cb,
6222 (unsigned long)dev);
6223 }
6224
6138 netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT); 6225 netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT);
6139 features = NETIF_F_SG | NETIF_F_IP_CSUM; 6226 features = NETIF_F_SG | NETIF_F_IP_CSUM;
6140 dev->features = features | NETIF_F_RXCSUM; 6227 dev->features = features | NETIF_F_RXCSUM;
@@ -6144,7 +6231,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
6144 err = register_netdev(dev); 6231 err = register_netdev(dev);
6145 if (err < 0) { 6232 if (err < 0) {
6146 dev_err(&pdev->dev, "failed to register netdev\n"); 6233 dev_err(&pdev->dev, "failed to register netdev\n");
6147 goto err_free_txq_pcpu; 6234 goto err_free_port_pcpu;
6148 } 6235 }
6149 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr); 6236 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
6150 6237
@@ -6153,6 +6240,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
6153 priv->port_list[id] = port; 6240 priv->port_list[id] = port;
6154 return 0; 6241 return 0;
6155 6242
6243err_free_port_pcpu:
6244 free_percpu(port->pcpu);
6156err_free_txq_pcpu: 6245err_free_txq_pcpu:
6157 for (i = 0; i < txq_number; i++) 6246 for (i = 0; i < txq_number; i++)
6158 free_percpu(port->txqs[i]->pcpu); 6247 free_percpu(port->txqs[i]->pcpu);
@@ -6171,6 +6260,7 @@ static void mvpp2_port_remove(struct mvpp2_port *port)
6171 int i; 6260 int i;
6172 6261
6173 unregister_netdev(port->dev); 6262 unregister_netdev(port->dev);
6263 free_percpu(port->pcpu);
6174 free_percpu(port->stats); 6264 free_percpu(port->stats);
6175 for (i = 0; i < txq_number; i++) 6265 for (i = 0; i < txq_number; i++)
6176 free_percpu(port->txqs[i]->pcpu); 6266 free_percpu(port->txqs[i]->pcpu);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 82040137d7d9..0a3202047569 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -686,6 +686,7 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
686{ 686{
687 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; 687 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
688 struct mlx4_cmd_context *context; 688 struct mlx4_cmd_context *context;
689 long ret_wait;
689 int err = 0; 690 int err = 0;
690 691
691 down(&cmd->event_sem); 692 down(&cmd->event_sem);
@@ -711,8 +712,20 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
711 if (err) 712 if (err)
712 goto out_reset; 713 goto out_reset;
713 714
714 if (!wait_for_completion_timeout(&context->done, 715 if (op == MLX4_CMD_SENSE_PORT) {
715 msecs_to_jiffies(timeout))) { 716 ret_wait =
717 wait_for_completion_interruptible_timeout(&context->done,
718 msecs_to_jiffies(timeout));
719 if (ret_wait < 0) {
720 context->fw_status = 0;
721 context->out_param = 0;
722 context->result = 0;
723 }
724 } else {
725 ret_wait = (long)wait_for_completion_timeout(&context->done,
726 msecs_to_jiffies(timeout));
727 }
728 if (!ret_wait) {
716 mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n", 729 mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
717 op); 730 op);
718 if (op == MLX4_CMD_NOP) { 731 if (op == MLX4_CMD_NOP) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 7a4f20bb7fcb..9c145dddd717 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -246,7 +246,6 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
246 246
247static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring) 247static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring)
248{ 248{
249 BUG_ON((u32)(ring->prod - ring->cons) > ring->actual_size);
250 return ring->prod == ring->cons; 249 return ring->prod == ring->cons;
251} 250}
252 251
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index aae13adfb492..8e81e53c370e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -601,7 +601,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
601 continue; 601 continue;
602 mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n", 602 mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n",
603 __func__, i, port); 603 __func__, i, port);
604 s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; 604 s_info = &priv->mfunc.master.vf_oper[i].vport[port].state;
605 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) { 605 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
606 eqe->event.port_change.port = 606 eqe->event.port_change.port =
607 cpu_to_be32( 607 cpu_to_be32(
@@ -640,7 +640,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
640 continue; 640 continue;
641 if (i == mlx4_master_func_num(dev)) 641 if (i == mlx4_master_func_num(dev))
642 continue; 642 continue;
643 s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; 643 s_info = &priv->mfunc.master.vf_oper[i].vport[port].state;
644 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) { 644 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
645 eqe->event.port_change.port = 645 eqe->event.port_change.port =
646 cpu_to_be32( 646 cpu_to_be32(
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 12fbfcb44d8a..29c2a017a450 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2273,6 +2273,11 @@ static int mlx4_allocate_default_counters(struct mlx4_dev *dev)
2273 } else if (err == -ENOENT) { 2273 } else if (err == -ENOENT) {
2274 err = 0; 2274 err = 0;
2275 continue; 2275 continue;
2276 } else if (mlx4_is_slave(dev) && err == -EINVAL) {
2277 priv->def_counter[port] = MLX4_SINK_COUNTER_INDEX(dev);
2278 mlx4_warn(dev, "can't allocate counter from old PF driver, using index %d\n",
2279 MLX4_SINK_COUNTER_INDEX(dev));
2280 err = 0;
2276 } else { 2281 } else {
2277 mlx4_err(dev, "%s: failed to allocate default counter port %d err %d\n", 2282 mlx4_err(dev, "%s: failed to allocate default counter port %d err %d\n",
2278 __func__, port + 1, err); 2283 __func__, port + 1, err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index afad529838de..06e3e1e54c35 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -391,6 +391,8 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
391 /* disable cmdif checksum */ 391 /* disable cmdif checksum */
392 MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0); 392 MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
393 393
394 MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
395
394 err = set_caps(dev, set_ctx, set_sz); 396 err = set_caps(dev, set_ctx, set_sz);
395 397
396query_ex: 398query_ex:
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index f78909a00f15..09d2e16fd6b0 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -952,9 +952,8 @@ static int ks8842_alloc_dma_bufs(struct net_device *netdev)
952 952
953 sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev, 953 sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev,
954 tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE); 954 tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE);
955 err = dma_mapping_error(adapter->dev, 955 if (dma_mapping_error(adapter->dev, sg_dma_address(&tx_ctl->sg))) {
956 sg_dma_address(&tx_ctl->sg)); 956 err = -ENOMEM;
957 if (err) {
958 sg_dma_address(&tx_ctl->sg) = 0; 957 sg_dma_address(&tx_ctl->sg) = 0;
959 goto err; 958 goto err;
960 } 959 }
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 33669c29b341..753ea8bad953 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -1415,7 +1415,7 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
1415 if (fw->size & 0xF) { 1415 if (fw->size & 0xF) {
1416 addr = dest + size; 1416 addr = dest + size;
1417 for (i = 0; i < (fw->size & 0xF); i++) 1417 for (i = 0; i < (fw->size & 0xF); i++)
1418 data[i] = temp[size + i]; 1418 data[i] = ((u8 *)temp)[size + i];
1419 for (; i < 16; i++) 1419 for (; i < 16; i++)
1420 data[i] = 0; 1420 data[i] = 0;
1421 ret = qlcnic_ms_mem_write128(adapter, addr, 1421 ret = qlcnic_ms_mem_write128(adapter, addr,
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 3df51faf18ae..f790f61ea78a 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -4875,10 +4875,12 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
4875 case RTL_GIGA_MAC_VER_46: 4875 case RTL_GIGA_MAC_VER_46:
4876 case RTL_GIGA_MAC_VER_47: 4876 case RTL_GIGA_MAC_VER_47:
4877 case RTL_GIGA_MAC_VER_48: 4877 case RTL_GIGA_MAC_VER_48:
4878 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF);
4879 break;
4878 case RTL_GIGA_MAC_VER_49: 4880 case RTL_GIGA_MAC_VER_49:
4879 case RTL_GIGA_MAC_VER_50: 4881 case RTL_GIGA_MAC_VER_50:
4880 case RTL_GIGA_MAC_VER_51: 4882 case RTL_GIGA_MAC_VER_51:
4881 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF); 4883 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
4882 break; 4884 break;
4883 default: 4885 default:
4884 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST); 4886 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index fd9745714d90..78849dd4ef8e 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -228,9 +228,7 @@ static void ravb_ring_format(struct net_device *ndev, int q)
228 struct ravb_desc *desc = NULL; 228 struct ravb_desc *desc = NULL;
229 int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; 229 int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
230 int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q]; 230 int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q];
231 struct sk_buff *skb;
232 dma_addr_t dma_addr; 231 dma_addr_t dma_addr;
233 void *buffer;
234 int i; 232 int i;
235 233
236 priv->cur_rx[q] = 0; 234 priv->cur_rx[q] = 0;
@@ -241,41 +239,28 @@ static void ravb_ring_format(struct net_device *ndev, int q)
241 memset(priv->rx_ring[q], 0, rx_ring_size); 239 memset(priv->rx_ring[q], 0, rx_ring_size);
242 /* Build RX ring buffer */ 240 /* Build RX ring buffer */
243 for (i = 0; i < priv->num_rx_ring[q]; i++) { 241 for (i = 0; i < priv->num_rx_ring[q]; i++) {
244 priv->rx_skb[q][i] = NULL;
245 skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1);
246 if (!skb)
247 break;
248 ravb_set_buffer_align(skb);
249 /* RX descriptor */ 242 /* RX descriptor */
250 rx_desc = &priv->rx_ring[q][i]; 243 rx_desc = &priv->rx_ring[q][i];
251 /* The size of the buffer should be on 16-byte boundary. */ 244 /* The size of the buffer should be on 16-byte boundary. */
252 rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16)); 245 rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
253 dma_addr = dma_map_single(&ndev->dev, skb->data, 246 dma_addr = dma_map_single(&ndev->dev, priv->rx_skb[q][i]->data,
254 ALIGN(PKT_BUF_SZ, 16), 247 ALIGN(PKT_BUF_SZ, 16),
255 DMA_FROM_DEVICE); 248 DMA_FROM_DEVICE);
256 if (dma_mapping_error(&ndev->dev, dma_addr)) { 249 /* We just set the data size to 0 for a failed mapping which
257 dev_kfree_skb(skb); 250 * should prevent DMA from happening...
258 break; 251 */
259 } 252 if (dma_mapping_error(&ndev->dev, dma_addr))
260 priv->rx_skb[q][i] = skb; 253 rx_desc->ds_cc = cpu_to_le16(0);
261 rx_desc->dptr = cpu_to_le32(dma_addr); 254 rx_desc->dptr = cpu_to_le32(dma_addr);
262 rx_desc->die_dt = DT_FEMPTY; 255 rx_desc->die_dt = DT_FEMPTY;
263 } 256 }
264 rx_desc = &priv->rx_ring[q][i]; 257 rx_desc = &priv->rx_ring[q][i];
265 rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); 258 rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
266 rx_desc->die_dt = DT_LINKFIX; /* type */ 259 rx_desc->die_dt = DT_LINKFIX; /* type */
267 priv->dirty_rx[q] = (u32)(i - priv->num_rx_ring[q]);
268 260
269 memset(priv->tx_ring[q], 0, tx_ring_size); 261 memset(priv->tx_ring[q], 0, tx_ring_size);
270 /* Build TX ring buffer */ 262 /* Build TX ring buffer */
271 for (i = 0; i < priv->num_tx_ring[q]; i++) { 263 for (i = 0; i < priv->num_tx_ring[q]; i++) {
272 priv->tx_skb[q][i] = NULL;
273 priv->tx_buffers[q][i] = NULL;
274 buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL);
275 if (!buffer)
276 break;
277 /* Aligned TX buffer */
278 priv->tx_buffers[q][i] = buffer;
279 tx_desc = &priv->tx_ring[q][i]; 264 tx_desc = &priv->tx_ring[q][i];
280 tx_desc->die_dt = DT_EEMPTY; 265 tx_desc->die_dt = DT_EEMPTY;
281 } 266 }
@@ -298,7 +283,10 @@ static void ravb_ring_format(struct net_device *ndev, int q)
298static int ravb_ring_init(struct net_device *ndev, int q) 283static int ravb_ring_init(struct net_device *ndev, int q)
299{ 284{
300 struct ravb_private *priv = netdev_priv(ndev); 285 struct ravb_private *priv = netdev_priv(ndev);
286 struct sk_buff *skb;
301 int ring_size; 287 int ring_size;
288 void *buffer;
289 int i;
302 290
303 /* Allocate RX and TX skb rings */ 291 /* Allocate RX and TX skb rings */
304 priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q], 292 priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
@@ -308,12 +296,28 @@ static int ravb_ring_init(struct net_device *ndev, int q)
308 if (!priv->rx_skb[q] || !priv->tx_skb[q]) 296 if (!priv->rx_skb[q] || !priv->tx_skb[q])
309 goto error; 297 goto error;
310 298
299 for (i = 0; i < priv->num_rx_ring[q]; i++) {
300 skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1);
301 if (!skb)
302 goto error;
303 ravb_set_buffer_align(skb);
304 priv->rx_skb[q][i] = skb;
305 }
306
311 /* Allocate rings for the aligned buffers */ 307 /* Allocate rings for the aligned buffers */
312 priv->tx_buffers[q] = kcalloc(priv->num_tx_ring[q], 308 priv->tx_buffers[q] = kcalloc(priv->num_tx_ring[q],
313 sizeof(*priv->tx_buffers[q]), GFP_KERNEL); 309 sizeof(*priv->tx_buffers[q]), GFP_KERNEL);
314 if (!priv->tx_buffers[q]) 310 if (!priv->tx_buffers[q])
315 goto error; 311 goto error;
316 312
313 for (i = 0; i < priv->num_tx_ring[q]; i++) {
314 buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL);
315 if (!buffer)
316 goto error;
317 /* Aligned TX buffer */
318 priv->tx_buffers[q][i] = buffer;
319 }
320
317 /* Allocate all RX descriptors. */ 321 /* Allocate all RX descriptors. */
318 ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1); 322 ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
319 priv->rx_ring[q] = dma_alloc_coherent(NULL, ring_size, 323 priv->rx_ring[q] = dma_alloc_coherent(NULL, ring_size,
@@ -524,6 +528,10 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
524 if (--boguscnt < 0) 528 if (--boguscnt < 0)
525 break; 529 break;
526 530
531 /* We use 0-byte descriptors to mark the DMA mapping errors */
532 if (!pkt_len)
533 continue;
534
527 if (desc_status & MSC_MC) 535 if (desc_status & MSC_MC)
528 stats->multicast++; 536 stats->multicast++;
529 537
@@ -543,10 +551,9 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
543 551
544 skb = priv->rx_skb[q][entry]; 552 skb = priv->rx_skb[q][entry];
545 priv->rx_skb[q][entry] = NULL; 553 priv->rx_skb[q][entry] = NULL;
546 dma_sync_single_for_cpu(&ndev->dev, 554 dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
547 le32_to_cpu(desc->dptr), 555 ALIGN(PKT_BUF_SZ, 16),
548 ALIGN(PKT_BUF_SZ, 16), 556 DMA_FROM_DEVICE);
549 DMA_FROM_DEVICE);
550 get_ts &= (q == RAVB_NC) ? 557 get_ts &= (q == RAVB_NC) ?
551 RAVB_RXTSTAMP_TYPE_V2_L2_EVENT : 558 RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
552 ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT; 559 ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
@@ -584,17 +591,15 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
584 if (!skb) 591 if (!skb)
585 break; /* Better luck next round. */ 592 break; /* Better luck next round. */
586 ravb_set_buffer_align(skb); 593 ravb_set_buffer_align(skb);
587 dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
588 ALIGN(PKT_BUF_SZ, 16),
589 DMA_FROM_DEVICE);
590 dma_addr = dma_map_single(&ndev->dev, skb->data, 594 dma_addr = dma_map_single(&ndev->dev, skb->data,
591 le16_to_cpu(desc->ds_cc), 595 le16_to_cpu(desc->ds_cc),
592 DMA_FROM_DEVICE); 596 DMA_FROM_DEVICE);
593 skb_checksum_none_assert(skb); 597 skb_checksum_none_assert(skb);
594 if (dma_mapping_error(&ndev->dev, dma_addr)) { 598 /* We just set the data size to 0 for a failed mapping
595 dev_kfree_skb_any(skb); 599 * which should prevent DMA from happening...
596 break; 600 */
597 } 601 if (dma_mapping_error(&ndev->dev, dma_addr))
602 desc->ds_cc = cpu_to_le16(0);
598 desc->dptr = cpu_to_le32(dma_addr); 603 desc->dptr = cpu_to_le32(dma_addr);
599 priv->rx_skb[q][entry] = skb; 604 priv->rx_skb[q][entry] = skb;
600 } 605 }
@@ -1279,7 +1284,6 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1279 u32 dma_addr; 1284 u32 dma_addr;
1280 void *buffer; 1285 void *buffer;
1281 u32 entry; 1286 u32 entry;
1282 u32 tccr;
1283 1287
1284 spin_lock_irqsave(&priv->lock, flags); 1288 spin_lock_irqsave(&priv->lock, flags);
1285 if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q]) { 1289 if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q]) {
@@ -1328,9 +1332,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1328 dma_wmb(); 1332 dma_wmb();
1329 desc->die_dt = DT_FSINGLE; 1333 desc->die_dt = DT_FSINGLE;
1330 1334
1331 tccr = ravb_read(ndev, TCCR); 1335 ravb_write(ndev, ravb_read(ndev, TCCR) | (TCCR_TSRQ0 << q), TCCR);
1332 if (!(tccr & (TCCR_TSRQ0 << q)))
1333 ravb_write(ndev, tccr | (TCCR_TSRQ0 << q), TCCR);
1334 1336
1335 priv->cur_tx[q]++; 1337 priv->cur_tx[q]++;
1336 if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q] && 1338 if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q] &&
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index 2d8578cade03..2e7f9a2834be 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -4821,6 +4821,7 @@ static void rocker_remove_ports(const struct rocker *rocker)
4821 rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE, 4821 rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
4822 ROCKER_OP_FLAG_REMOVE); 4822 ROCKER_OP_FLAG_REMOVE);
4823 unregister_netdev(rocker_port->dev); 4823 unregister_netdev(rocker_port->dev);
4824 free_netdev(rocker_port->dev);
4824 } 4825 }
4825 kfree(rocker->ports); 4826 kfree(rocker->ports);
4826} 4827}
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 847643455468..605cc8948594 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -101,6 +101,11 @@ static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
101 return resource_size(&efx->pci_dev->resource[bar]); 101 return resource_size(&efx->pci_dev->resource[bar]);
102} 102}
103 103
104static bool efx_ef10_is_vf(struct efx_nic *efx)
105{
106 return efx->type->is_vf;
107}
108
104static int efx_ef10_get_pf_index(struct efx_nic *efx) 109static int efx_ef10_get_pf_index(struct efx_nic *efx)
105{ 110{
106 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN); 111 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
@@ -677,6 +682,48 @@ static int efx_ef10_probe_pf(struct efx_nic *efx)
677 return efx_ef10_probe(efx); 682 return efx_ef10_probe(efx);
678} 683}
679 684
685int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id)
686{
687 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN);
688
689 MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
690 return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf),
691 NULL, 0, NULL);
692}
693
694int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id)
695{
696 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN);
697
698 MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
699 return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf),
700 NULL, 0, NULL);
701}
702
703int efx_ef10_vport_add_mac(struct efx_nic *efx,
704 unsigned int port_id, u8 *mac)
705{
706 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN);
707
708 MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id);
709 ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac);
710
711 return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf,
712 sizeof(inbuf), NULL, 0, NULL);
713}
714
715int efx_ef10_vport_del_mac(struct efx_nic *efx,
716 unsigned int port_id, u8 *mac)
717{
718 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN);
719
720 MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id);
721 ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac);
722
723 return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf,
724 sizeof(inbuf), NULL, 0, NULL);
725}
726
680#ifdef CONFIG_SFC_SRIOV 727#ifdef CONFIG_SFC_SRIOV
681static int efx_ef10_probe_vf(struct efx_nic *efx) 728static int efx_ef10_probe_vf(struct efx_nic *efx)
682{ 729{
@@ -3804,6 +3851,72 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
3804 WARN_ON(remove_failed); 3851 WARN_ON(remove_failed);
3805} 3852}
3806 3853
3854static int efx_ef10_vport_set_mac_address(struct efx_nic *efx)
3855{
3856 struct efx_ef10_nic_data *nic_data = efx->nic_data;
3857 u8 mac_old[ETH_ALEN];
3858 int rc, rc2;
3859
3860 /* Only reconfigure a PF-created vport */
3861 if (is_zero_ether_addr(nic_data->vport_mac))
3862 return 0;
3863
3864 efx_device_detach_sync(efx);
3865 efx_net_stop(efx->net_dev);
3866 down_write(&efx->filter_sem);
3867 efx_ef10_filter_table_remove(efx);
3868 up_write(&efx->filter_sem);
3869
3870 rc = efx_ef10_vadaptor_free(efx, nic_data->vport_id);
3871 if (rc)
3872 goto restore_filters;
3873
3874 ether_addr_copy(mac_old, nic_data->vport_mac);
3875 rc = efx_ef10_vport_del_mac(efx, nic_data->vport_id,
3876 nic_data->vport_mac);
3877 if (rc)
3878 goto restore_vadaptor;
3879
3880 rc = efx_ef10_vport_add_mac(efx, nic_data->vport_id,
3881 efx->net_dev->dev_addr);
3882 if (!rc) {
3883 ether_addr_copy(nic_data->vport_mac, efx->net_dev->dev_addr);
3884 } else {
3885 rc2 = efx_ef10_vport_add_mac(efx, nic_data->vport_id, mac_old);
3886 if (rc2) {
3887 /* Failed to add original MAC, so clear vport_mac */
3888 eth_zero_addr(nic_data->vport_mac);
3889 goto reset_nic;
3890 }
3891 }
3892
3893restore_vadaptor:
3894 rc2 = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
3895 if (rc2)
3896 goto reset_nic;
3897restore_filters:
3898 down_write(&efx->filter_sem);
3899 rc2 = efx_ef10_filter_table_probe(efx);
3900 up_write(&efx->filter_sem);
3901 if (rc2)
3902 goto reset_nic;
3903
3904 rc2 = efx_net_open(efx->net_dev);
3905 if (rc2)
3906 goto reset_nic;
3907
3908 netif_device_attach(efx->net_dev);
3909
3910 return rc;
3911
3912reset_nic:
3913 netif_err(efx, drv, efx->net_dev,
3914 "Failed to restore when changing MAC address - scheduling reset\n");
3915 efx_schedule_reset(efx, RESET_TYPE_DATAPATH);
3916
3917 return rc ? rc : rc2;
3918}
3919
3807static int efx_ef10_set_mac_address(struct efx_nic *efx) 3920static int efx_ef10_set_mac_address(struct efx_nic *efx)
3808{ 3921{
3809 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN); 3922 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN);
@@ -3820,8 +3933,8 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
3820 efx->net_dev->dev_addr); 3933 efx->net_dev->dev_addr);
3821 MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID, 3934 MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID,
3822 nic_data->vport_id); 3935 nic_data->vport_id);
3823 rc = efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf, 3936 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf,
3824 sizeof(inbuf), NULL, 0, NULL); 3937 sizeof(inbuf), NULL, 0, NULL);
3825 3938
3826 efx_ef10_filter_table_probe(efx); 3939 efx_ef10_filter_table_probe(efx);
3827 up_write(&efx->filter_sem); 3940 up_write(&efx->filter_sem);
@@ -3829,38 +3942,27 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
3829 efx_net_open(efx->net_dev); 3942 efx_net_open(efx->net_dev);
3830 netif_device_attach(efx->net_dev); 3943 netif_device_attach(efx->net_dev);
3831 3944
3832#if !defined(CONFIG_SFC_SRIOV) 3945#ifdef CONFIG_SFC_SRIOV
3833 if (rc == -EPERM) 3946 if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) {
3834 netif_err(efx, drv, efx->net_dev,
3835 "Cannot change MAC address; use sfboot to enable mac-spoofing"
3836 " on this interface\n");
3837#else
3838 if (rc == -EPERM) {
3839 struct pci_dev *pci_dev_pf = efx->pci_dev->physfn; 3947 struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
3840 3948
3841 /* Switch to PF and change MAC address on vport */ 3949 if (rc == -EPERM) {
3842 if (efx->pci_dev->is_virtfn && pci_dev_pf) { 3950 struct efx_nic *efx_pf;
3843 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
3844 3951
3845 if (!efx_ef10_sriov_set_vf_mac(efx_pf, 3952 /* Switch to PF and change MAC address on vport */
3846 nic_data->vf_index, 3953 efx_pf = pci_get_drvdata(pci_dev_pf);
3847 efx->net_dev->dev_addr))
3848 return 0;
3849 }
3850 netif_err(efx, drv, efx->net_dev,
3851 "Cannot change MAC address; use sfboot to enable mac-spoofing"
3852 " on this interface\n");
3853 } else if (efx->pci_dev->is_virtfn) {
3854 /* Successfully changed by VF (with MAC spoofing), so update the
3855 * parent PF if possible.
3856 */
3857 struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
3858 3954
3859 if (pci_dev_pf) { 3955 rc = efx_ef10_sriov_set_vf_mac(efx_pf,
3956 nic_data->vf_index,
3957 efx->net_dev->dev_addr);
3958 } else if (!rc) {
3860 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); 3959 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
3861 struct efx_ef10_nic_data *nic_data = efx_pf->nic_data; 3960 struct efx_ef10_nic_data *nic_data = efx_pf->nic_data;
3862 unsigned int i; 3961 unsigned int i;
3863 3962
3963 /* MAC address successfully changed by VF (with MAC
3964 * spoofing) so update the parent PF if possible.
3965 */
3864 for (i = 0; i < efx_pf->vf_count; ++i) { 3966 for (i = 0; i < efx_pf->vf_count; ++i) {
3865 struct ef10_vf *vf = nic_data->vf + i; 3967 struct ef10_vf *vf = nic_data->vf + i;
3866 3968
@@ -3871,8 +3973,24 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
3871 } 3973 }
3872 } 3974 }
3873 } 3975 }
3874 } 3976 } else
3875#endif 3977#endif
3978 if (rc == -EPERM) {
3979 netif_err(efx, drv, efx->net_dev,
3980 "Cannot change MAC address; use sfboot to enable"
3981 " mac-spoofing on this interface\n");
3982 } else if (rc == -ENOSYS && !efx_ef10_is_vf(efx)) {
3983 /* If the active MCFW does not support MC_CMD_VADAPTOR_SET_MAC
3984 * fall-back to the method of changing the MAC address on the
3985 * vport. This only applies to PFs because such versions of
3986 * MCFW do not support VFs.
3987 */
3988 rc = efx_ef10_vport_set_mac_address(efx);
3989 } else {
3990 efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC,
3991 sizeof(inbuf), NULL, 0, rc);
3992 }
3993
3876 return rc; 3994 return rc;
3877} 3995}
3878 3996
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
index 6c9b6e45509a..3c17f274e802 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.c
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -29,30 +29,6 @@ static int efx_ef10_evb_port_assign(struct efx_nic *efx, unsigned int port_id,
29 NULL, 0, NULL); 29 NULL, 0, NULL);
30} 30}
31 31
32static int efx_ef10_vport_add_mac(struct efx_nic *efx,
33 unsigned int port_id, u8 *mac)
34{
35 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN);
36
37 MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id);
38 ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac);
39
40 return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf,
41 sizeof(inbuf), NULL, 0, NULL);
42}
43
44static int efx_ef10_vport_del_mac(struct efx_nic *efx,
45 unsigned int port_id, u8 *mac)
46{
47 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN);
48
49 MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id);
50 ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac);
51
52 return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf,
53 sizeof(inbuf), NULL, 0, NULL);
54}
55
56static int efx_ef10_vswitch_alloc(struct efx_nic *efx, unsigned int port_id, 32static int efx_ef10_vswitch_alloc(struct efx_nic *efx, unsigned int port_id,
57 unsigned int vswitch_type) 33 unsigned int vswitch_type)
58{ 34{
@@ -136,24 +112,6 @@ static int efx_ef10_vport_free(struct efx_nic *efx, unsigned int port_id)
136 NULL, 0, NULL); 112 NULL, 0, NULL);
137} 113}
138 114
139static int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id)
140{
141 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN);
142
143 MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
144 return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf),
145 NULL, 0, NULL);
146}
147
148static int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id)
149{
150 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN);
151
152 MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
153 return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf),
154 NULL, 0, NULL);
155}
156
157static void efx_ef10_sriov_free_vf_vports(struct efx_nic *efx) 115static void efx_ef10_sriov_free_vf_vports(struct efx_nic *efx)
158{ 116{
159 struct efx_ef10_nic_data *nic_data = efx->nic_data; 117 struct efx_ef10_nic_data *nic_data = efx->nic_data;
@@ -640,21 +598,21 @@ int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan,
640 MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL, 598 MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL,
641 vf->vlan, &vf->vport_id); 599 vf->vlan, &vf->vport_id);
642 if (rc) 600 if (rc)
643 goto reset_nic; 601 goto reset_nic_up_write;
644 602
645restore_mac: 603restore_mac:
646 if (!is_zero_ether_addr(vf->mac)) { 604 if (!is_zero_ether_addr(vf->mac)) {
647 rc2 = efx_ef10_vport_add_mac(efx, vf->vport_id, vf->mac); 605 rc2 = efx_ef10_vport_add_mac(efx, vf->vport_id, vf->mac);
648 if (rc2) { 606 if (rc2) {
649 eth_zero_addr(vf->mac); 607 eth_zero_addr(vf->mac);
650 goto reset_nic; 608 goto reset_nic_up_write;
651 } 609 }
652 } 610 }
653 611
654restore_evb_port: 612restore_evb_port:
655 rc2 = efx_ef10_evb_port_assign(efx, vf->vport_id, vf_i); 613 rc2 = efx_ef10_evb_port_assign(efx, vf->vport_id, vf_i);
656 if (rc2) 614 if (rc2)
657 goto reset_nic; 615 goto reset_nic_up_write;
658 else 616 else
659 vf->vport_assigned = 1; 617 vf->vport_assigned = 1;
660 618
@@ -662,14 +620,16 @@ restore_vadaptor:
662 if (vf->efx) { 620 if (vf->efx) {
663 rc2 = efx_ef10_vadaptor_alloc(vf->efx, EVB_PORT_ID_ASSIGNED); 621 rc2 = efx_ef10_vadaptor_alloc(vf->efx, EVB_PORT_ID_ASSIGNED);
664 if (rc2) 622 if (rc2)
665 goto reset_nic; 623 goto reset_nic_up_write;
666 } 624 }
667 625
668restore_filters: 626restore_filters:
669 if (vf->efx) { 627 if (vf->efx) {
670 rc2 = vf->efx->type->filter_table_probe(vf->efx); 628 rc2 = vf->efx->type->filter_table_probe(vf->efx);
671 if (rc2) 629 if (rc2)
672 goto reset_nic; 630 goto reset_nic_up_write;
631
632 up_write(&vf->efx->filter_sem);
673 633
674 up_write(&vf->efx->filter_sem); 634 up_write(&vf->efx->filter_sem);
675 635
@@ -681,9 +641,12 @@ restore_filters:
681 } 641 }
682 return rc; 642 return rc;
683 643
644reset_nic_up_write:
645 if (vf->efx)
646 up_write(&vf->efx->filter_sem);
647
684reset_nic: 648reset_nic:
685 if (vf->efx) { 649 if (vf->efx) {
686 up_write(&vf->efx->filter_sem);
687 netif_err(efx, drv, efx->net_dev, 650 netif_err(efx, drv, efx->net_dev,
688 "Failed to restore VF - scheduling reset.\n"); 651 "Failed to restore VF - scheduling reset.\n");
689 efx_schedule_reset(vf->efx, RESET_TYPE_DATAPATH); 652 efx_schedule_reset(vf->efx, RESET_TYPE_DATAPATH);
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.h b/drivers/net/ethernet/sfc/ef10_sriov.h
index db4ef537c610..6d25b92cb45e 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.h
+++ b/drivers/net/ethernet/sfc/ef10_sriov.h
@@ -65,5 +65,11 @@ int efx_ef10_vswitching_restore_pf(struct efx_nic *efx);
65int efx_ef10_vswitching_restore_vf(struct efx_nic *efx); 65int efx_ef10_vswitching_restore_vf(struct efx_nic *efx);
66void efx_ef10_vswitching_remove_pf(struct efx_nic *efx); 66void efx_ef10_vswitching_remove_pf(struct efx_nic *efx);
67void efx_ef10_vswitching_remove_vf(struct efx_nic *efx); 67void efx_ef10_vswitching_remove_vf(struct efx_nic *efx);
68int efx_ef10_vport_add_mac(struct efx_nic *efx,
69 unsigned int port_id, u8 *mac);
70int efx_ef10_vport_del_mac(struct efx_nic *efx,
71 unsigned int port_id, u8 *mac);
72int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id);
73int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id);
68 74
69#endif /* EF10_SRIOV_H */ 75#endif /* EF10_SRIOV_H */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 804b9ad553d3..03bc03b67f08 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -245,11 +245,17 @@ static int efx_check_disabled(struct efx_nic *efx)
245 */ 245 */
246static int efx_process_channel(struct efx_channel *channel, int budget) 246static int efx_process_channel(struct efx_channel *channel, int budget)
247{ 247{
248 struct efx_tx_queue *tx_queue;
248 int spent; 249 int spent;
249 250
250 if (unlikely(!channel->enabled)) 251 if (unlikely(!channel->enabled))
251 return 0; 252 return 0;
252 253
254 efx_for_each_channel_tx_queue(tx_queue, channel) {
255 tx_queue->pkts_compl = 0;
256 tx_queue->bytes_compl = 0;
257 }
258
253 spent = efx_nic_process_eventq(channel, budget); 259 spent = efx_nic_process_eventq(channel, budget);
254 if (spent && efx_channel_has_rx_queue(channel)) { 260 if (spent && efx_channel_has_rx_queue(channel)) {
255 struct efx_rx_queue *rx_queue = 261 struct efx_rx_queue *rx_queue =
@@ -259,6 +265,14 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
259 efx_fast_push_rx_descriptors(rx_queue, true); 265 efx_fast_push_rx_descriptors(rx_queue, true);
260 } 266 }
261 267
268 /* Update BQL */
269 efx_for_each_channel_tx_queue(tx_queue, channel) {
270 if (tx_queue->bytes_compl) {
271 netdev_tx_completed_queue(tx_queue->core_txq,
272 tx_queue->pkts_compl, tx_queue->bytes_compl);
273 }
274 }
275
262 return spent; 276 return spent;
263} 277}
264 278
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index d72f522bf9c3..47d1e3a96522 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -241,6 +241,8 @@ struct efx_tx_queue {
241 unsigned int read_count ____cacheline_aligned_in_smp; 241 unsigned int read_count ____cacheline_aligned_in_smp;
242 unsigned int old_write_count; 242 unsigned int old_write_count;
243 unsigned int merge_events; 243 unsigned int merge_events;
244 unsigned int bytes_compl;
245 unsigned int pkts_compl;
244 246
245 /* Members used only on the xmit path */ 247 /* Members used only on the xmit path */
246 unsigned int insert_count ____cacheline_aligned_in_smp; 248 unsigned int insert_count ____cacheline_aligned_in_smp;
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index aaf2987512b5..1833a0146571 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -617,7 +617,8 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
617 EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask); 617 EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
618 618
619 efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl); 619 efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
620 netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl); 620 tx_queue->pkts_compl += pkts_compl;
621 tx_queue->bytes_compl += bytes_compl;
621 622
622 if (pkts_compl > 1) 623 if (pkts_compl > 1)
623 ++tx_queue->merge_events; 624 ++tx_queue->merge_events;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index 7e3129e7f143..f0e4bb4e3ec5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -42,7 +42,7 @@
42#define NSS_COMMON_CLK_DIV_MASK 0x7f 42#define NSS_COMMON_CLK_DIV_MASK 0x7f
43 43
44#define NSS_COMMON_CLK_SRC_CTRL 0x14 44#define NSS_COMMON_CLK_SRC_CTRL 0x14
45#define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x) (1 << x) 45#define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x) (x)
46/* Mode is coded on 1 bit but is different depending on the MAC ID: 46/* Mode is coded on 1 bit but is different depending on the MAC ID:
47 * MAC0: QSGMII=0 RGMII=1 47 * MAC0: QSGMII=0 RGMII=1
48 * MAC1: QSGMII=0 SGMII=0 RGMII=1 48 * MAC1: QSGMII=0 SGMII=0 RGMII=1
@@ -291,7 +291,7 @@ static void *ipq806x_gmac_setup(struct platform_device *pdev)
291 291
292 /* Configure the clock src according to the mode */ 292 /* Configure the clock src according to the mode */
293 regmap_read(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, &val); 293 regmap_read(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, &val);
294 val &= ~NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id); 294 val &= ~(1 << NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id));
295 switch (gmac->phy_mode) { 295 switch (gmac->phy_mode) {
296 case PHY_INTERFACE_MODE_RGMII: 296 case PHY_INTERFACE_MODE_RGMII:
297 val |= NSS_COMMON_CLK_SRC_CTRL_RGMII(gmac->id) << 297 val |= NSS_COMMON_CLK_SRC_CTRL_RGMII(gmac->id) <<
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 50f7a7a26821..864b476f7fd5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2843,7 +2843,7 @@ int stmmac_dvr_probe(struct device *device,
2843 if (res->mac) 2843 if (res->mac)
2844 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN); 2844 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
2845 2845
2846 dev_set_drvdata(device, priv); 2846 dev_set_drvdata(device, priv->dev);
2847 2847
2848 /* Verify driver arguments */ 2848 /* Verify driver arguments */
2849 stmmac_verify_args(); 2849 stmmac_verify_args();
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index f3918c7e7eeb..bcdc8955c719 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -413,3 +413,7 @@ static int stmmac_pltfr_resume(struct device *dev)
413SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops, stmmac_pltfr_suspend, 413SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops, stmmac_pltfr_suspend,
414 stmmac_pltfr_resume); 414 stmmac_pltfr_resume);
415EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops); 415EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops);
416
417MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet platform support");
418MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
419MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 0c5842aeb807..ab6051a43134 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6658,10 +6658,8 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
6658 struct sk_buff *skb_new; 6658 struct sk_buff *skb_new;
6659 6659
6660 skb_new = skb_realloc_headroom(skb, len); 6660 skb_new = skb_realloc_headroom(skb, len);
6661 if (!skb_new) { 6661 if (!skb_new)
6662 rp->tx_errors++;
6663 goto out_drop; 6662 goto out_drop;
6664 }
6665 kfree_skb(skb); 6663 kfree_skb(skb);
6666 skb = skb_new; 6664 skb = skb_new;
6667 } else 6665 } else
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 462820514fae..d155bf2573cd 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -138,19 +138,6 @@ do { \
138#define CPSW_CMINTMAX_INTVL (1000 / CPSW_CMINTMIN_CNT) 138#define CPSW_CMINTMAX_INTVL (1000 / CPSW_CMINTMIN_CNT)
139#define CPSW_CMINTMIN_INTVL ((1000 / CPSW_CMINTMAX_CNT) + 1) 139#define CPSW_CMINTMIN_INTVL ((1000 / CPSW_CMINTMAX_CNT) + 1)
140 140
141#define cpsw_enable_irq(priv) \
142 do { \
143 u32 i; \
144 for (i = 0; i < priv->num_irqs; i++) \
145 enable_irq(priv->irqs_table[i]); \
146 } while (0)
147#define cpsw_disable_irq(priv) \
148 do { \
149 u32 i; \
150 for (i = 0; i < priv->num_irqs; i++) \
151 disable_irq_nosync(priv->irqs_table[i]); \
152 } while (0)
153
154#define cpsw_slave_index(priv) \ 141#define cpsw_slave_index(priv) \
155 ((priv->data.dual_emac) ? priv->emac_port : \ 142 ((priv->data.dual_emac) ? priv->emac_port : \
156 priv->data.active_slave) 143 priv->data.active_slave)
@@ -509,9 +496,11 @@ static const struct cpsw_stats cpsw_gstrings_stats[] = {
509 (func)(slave++, ##arg); \ 496 (func)(slave++, ##arg); \
510 } while (0) 497 } while (0)
511#define cpsw_get_slave_ndev(priv, __slave_no__) \ 498#define cpsw_get_slave_ndev(priv, __slave_no__) \
512 (priv->slaves[__slave_no__].ndev) 499 ((__slave_no__ < priv->data.slaves) ? \
500 priv->slaves[__slave_no__].ndev : NULL)
513#define cpsw_get_slave_priv(priv, __slave_no__) \ 501#define cpsw_get_slave_priv(priv, __slave_no__) \
514 ((priv->slaves[__slave_no__].ndev) ? \ 502 (((__slave_no__ < priv->data.slaves) && \
503 (priv->slaves[__slave_no__].ndev)) ? \
515 netdev_priv(priv->slaves[__slave_no__].ndev) : NULL) \ 504 netdev_priv(priv->slaves[__slave_no__].ndev) : NULL) \
516 505
517#define cpsw_dual_emac_src_port_detect(status, priv, ndev, skb) \ 506#define cpsw_dual_emac_src_port_detect(status, priv, ndev, skb) \
@@ -781,7 +770,7 @@ static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
781 770
782 cpsw_intr_disable(priv); 771 cpsw_intr_disable(priv);
783 if (priv->irq_enabled == true) { 772 if (priv->irq_enabled == true) {
784 cpsw_disable_irq(priv); 773 disable_irq_nosync(priv->irqs_table[0]);
785 priv->irq_enabled = false; 774 priv->irq_enabled = false;
786 } 775 }
787 776
@@ -804,9 +793,7 @@ static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
804static int cpsw_poll(struct napi_struct *napi, int budget) 793static int cpsw_poll(struct napi_struct *napi, int budget)
805{ 794{
806 struct cpsw_priv *priv = napi_to_priv(napi); 795 struct cpsw_priv *priv = napi_to_priv(napi);
807 int num_tx, num_rx; 796 int num_rx;
808
809 num_tx = cpdma_chan_process(priv->txch, 128);
810 797
811 num_rx = cpdma_chan_process(priv->rxch, budget); 798 num_rx = cpdma_chan_process(priv->rxch, budget);
812 if (num_rx < budget) { 799 if (num_rx < budget) {
@@ -817,13 +804,12 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
817 prim_cpsw = cpsw_get_slave_priv(priv, 0); 804 prim_cpsw = cpsw_get_slave_priv(priv, 0);
818 if (prim_cpsw->irq_enabled == false) { 805 if (prim_cpsw->irq_enabled == false) {
819 prim_cpsw->irq_enabled = true; 806 prim_cpsw->irq_enabled = true;
820 cpsw_enable_irq(priv); 807 enable_irq(priv->irqs_table[0]);
821 } 808 }
822 } 809 }
823 810
824 if (num_rx || num_tx) 811 if (num_rx)
825 cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n", 812 cpsw_dbg(priv, intr, "poll %d rx pkts\n", num_rx);
826 num_rx, num_tx);
827 813
828 return num_rx; 814 return num_rx;
829} 815}
@@ -1333,7 +1319,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
1333 if (prim_cpsw->irq_enabled == false) { 1319 if (prim_cpsw->irq_enabled == false) {
1334 if ((priv == prim_cpsw) || !netif_running(prim_cpsw->ndev)) { 1320 if ((priv == prim_cpsw) || !netif_running(prim_cpsw->ndev)) {
1335 prim_cpsw->irq_enabled = true; 1321 prim_cpsw->irq_enabled = true;
1336 cpsw_enable_irq(prim_cpsw); 1322 enable_irq(prim_cpsw->irqs_table[0]);
1337 } 1323 }
1338 } 1324 }
1339 1325
diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h
index bbacf5cccec2..bb1bb72121c0 100644
--- a/drivers/net/ethernet/ti/netcp.h
+++ b/drivers/net/ethernet/ti/netcp.h
@@ -85,7 +85,6 @@ struct netcp_intf {
85 struct list_head rxhook_list_head; 85 struct list_head rxhook_list_head;
86 unsigned int rx_queue_id; 86 unsigned int rx_queue_id;
87 void *rx_fdq[KNAV_DMA_FDQ_PER_CHAN]; 87 void *rx_fdq[KNAV_DMA_FDQ_PER_CHAN];
88 u32 rx_buffer_sizes[KNAV_DMA_FDQ_PER_CHAN];
89 struct napi_struct rx_napi; 88 struct napi_struct rx_napi;
90 struct napi_struct tx_napi; 89 struct napi_struct tx_napi;
91 90
@@ -223,6 +222,7 @@ void *netcp_device_find_module(struct netcp_device *netcp_device,
223 222
224/* SGMII functions */ 223/* SGMII functions */
225int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port); 224int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port);
225bool netcp_sgmii_rtreset(void __iomem *sgmii_ofs, int port, bool set);
226int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port); 226int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port);
227int netcp_sgmii_config(void __iomem *sgmii_ofs, int port, u32 interface); 227int netcp_sgmii_config(void __iomem *sgmii_ofs, int port, u32 interface);
228 228
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 5ec4ed3f6c8d..4755838c6137 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -34,6 +34,7 @@
34#define NETCP_SOP_OFFSET (NET_IP_ALIGN + NET_SKB_PAD) 34#define NETCP_SOP_OFFSET (NET_IP_ALIGN + NET_SKB_PAD)
35#define NETCP_NAPI_WEIGHT 64 35#define NETCP_NAPI_WEIGHT 64
36#define NETCP_TX_TIMEOUT (5 * HZ) 36#define NETCP_TX_TIMEOUT (5 * HZ)
37#define NETCP_PACKET_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN)
37#define NETCP_MIN_PACKET_SIZE ETH_ZLEN 38#define NETCP_MIN_PACKET_SIZE ETH_ZLEN
38#define NETCP_MAX_MCAST_ADDR 16 39#define NETCP_MAX_MCAST_ADDR 16
39 40
@@ -804,30 +805,28 @@ static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
804 if (likely(fdq == 0)) { 805 if (likely(fdq == 0)) {
805 unsigned int primary_buf_len; 806 unsigned int primary_buf_len;
806 /* Allocate a primary receive queue entry */ 807 /* Allocate a primary receive queue entry */
807 buf_len = netcp->rx_buffer_sizes[0] + NETCP_SOP_OFFSET; 808 buf_len = NETCP_PACKET_SIZE + NETCP_SOP_OFFSET;
808 primary_buf_len = SKB_DATA_ALIGN(buf_len) + 809 primary_buf_len = SKB_DATA_ALIGN(buf_len) +
809 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 810 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
810 811
811 if (primary_buf_len <= PAGE_SIZE) { 812 bufptr = netdev_alloc_frag(primary_buf_len);
812 bufptr = netdev_alloc_frag(primary_buf_len); 813 pad[1] = primary_buf_len;
813 pad[1] = primary_buf_len;
814 } else {
815 bufptr = kmalloc(primary_buf_len, GFP_ATOMIC |
816 GFP_DMA32 | __GFP_COLD);
817 pad[1] = 0;
818 }
819 814
820 if (unlikely(!bufptr)) { 815 if (unlikely(!bufptr)) {
821 dev_warn_ratelimited(netcp->ndev_dev, "Primary RX buffer alloc failed\n"); 816 dev_warn_ratelimited(netcp->ndev_dev,
817 "Primary RX buffer alloc failed\n");
822 goto fail; 818 goto fail;
823 } 819 }
824 dma = dma_map_single(netcp->dev, bufptr, buf_len, 820 dma = dma_map_single(netcp->dev, bufptr, buf_len,
825 DMA_TO_DEVICE); 821 DMA_TO_DEVICE);
822 if (unlikely(dma_mapping_error(netcp->dev, dma)))
823 goto fail;
824
826 pad[0] = (u32)bufptr; 825 pad[0] = (u32)bufptr;
827 826
828 } else { 827 } else {
829 /* Allocate a secondary receive queue entry */ 828 /* Allocate a secondary receive queue entry */
830 page = alloc_page(GFP_ATOMIC | GFP_DMA32 | __GFP_COLD); 829 page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD);
831 if (unlikely(!page)) { 830 if (unlikely(!page)) {
832 dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n"); 831 dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n");
833 goto fail; 832 goto fail;
@@ -1010,7 +1009,7 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp)
1010 1009
1011 /* Map the linear buffer */ 1010 /* Map the linear buffer */
1012 dma_addr = dma_map_single(dev, skb->data, pkt_len, DMA_TO_DEVICE); 1011 dma_addr = dma_map_single(dev, skb->data, pkt_len, DMA_TO_DEVICE);
1013 if (unlikely(!dma_addr)) { 1012 if (unlikely(dma_mapping_error(dev, dma_addr))) {
1014 dev_err(netcp->ndev_dev, "Failed to map skb buffer\n"); 1013 dev_err(netcp->ndev_dev, "Failed to map skb buffer\n");
1015 return NULL; 1014 return NULL;
1016 } 1015 }
@@ -1546,8 +1545,8 @@ static int netcp_setup_navigator_resources(struct net_device *ndev)
1546 knav_queue_disable_notify(netcp->rx_queue); 1545 knav_queue_disable_notify(netcp->rx_queue);
1547 1546
1548 /* open Rx FDQs */ 1547 /* open Rx FDQs */
1549 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && 1548 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_queue_depths[i];
1550 netcp->rx_queue_depths[i] && netcp->rx_buffer_sizes[i]; ++i) { 1549 ++i) {
1551 snprintf(name, sizeof(name), "rx-fdq-%s-%d", ndev->name, i); 1550 snprintf(name, sizeof(name), "rx-fdq-%s-%d", ndev->name, i);
1552 netcp->rx_fdq[i] = knav_queue_open(name, KNAV_QUEUE_GP, 0); 1551 netcp->rx_fdq[i] = knav_queue_open(name, KNAV_QUEUE_GP, 0);
1553 if (IS_ERR_OR_NULL(netcp->rx_fdq[i])) { 1552 if (IS_ERR_OR_NULL(netcp->rx_fdq[i])) {
@@ -1617,11 +1616,11 @@ static int netcp_ndo_open(struct net_device *ndev)
1617 } 1616 }
1618 mutex_unlock(&netcp_modules_lock); 1617 mutex_unlock(&netcp_modules_lock);
1619 1618
1620 netcp_rxpool_refill(netcp);
1621 napi_enable(&netcp->rx_napi); 1619 napi_enable(&netcp->rx_napi);
1622 napi_enable(&netcp->tx_napi); 1620 napi_enable(&netcp->tx_napi);
1623 knav_queue_enable_notify(netcp->tx_compl_q); 1621 knav_queue_enable_notify(netcp->tx_compl_q);
1624 knav_queue_enable_notify(netcp->rx_queue); 1622 knav_queue_enable_notify(netcp->rx_queue);
1623 netcp_rxpool_refill(netcp);
1625 netif_tx_wake_all_queues(ndev); 1624 netif_tx_wake_all_queues(ndev);
1626 dev_dbg(netcp->ndev_dev, "netcp device %s opened\n", ndev->name); 1625 dev_dbg(netcp->ndev_dev, "netcp device %s opened\n", ndev->name);
1627 return 0; 1626 return 0;
@@ -1941,14 +1940,6 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
1941 netcp->rx_queue_depths[0] = 128; 1940 netcp->rx_queue_depths[0] = 128;
1942 } 1941 }
1943 1942
1944 ret = of_property_read_u32_array(node_interface, "rx-buffer-size",
1945 netcp->rx_buffer_sizes,
1946 KNAV_DMA_FDQ_PER_CHAN);
1947 if (ret) {
1948 dev_err(dev, "missing \"rx-buffer-size\" parameter\n");
1949 netcp->rx_buffer_sizes[0] = 1536;
1950 }
1951
1952 ret = of_property_read_u32_array(node_interface, "rx-pool", temp, 2); 1943 ret = of_property_read_u32_array(node_interface, "rx-pool", temp, 2);
1953 if (ret < 0) { 1944 if (ret < 0) {
1954 dev_err(dev, "missing \"rx-pool\" parameter\n"); 1945 dev_err(dev, "missing \"rx-pool\" parameter\n");
@@ -2112,6 +2103,7 @@ probe_quit:
2112static int netcp_remove(struct platform_device *pdev) 2103static int netcp_remove(struct platform_device *pdev)
2113{ 2104{
2114 struct netcp_device *netcp_device = platform_get_drvdata(pdev); 2105 struct netcp_device *netcp_device = platform_get_drvdata(pdev);
2106 struct netcp_intf *netcp_intf, *netcp_tmp;
2115 struct netcp_inst_modpriv *inst_modpriv, *tmp; 2107 struct netcp_inst_modpriv *inst_modpriv, *tmp;
2116 struct netcp_module *module; 2108 struct netcp_module *module;
2117 2109
@@ -2123,10 +2115,17 @@ static int netcp_remove(struct platform_device *pdev)
2123 list_del(&inst_modpriv->inst_list); 2115 list_del(&inst_modpriv->inst_list);
2124 kfree(inst_modpriv); 2116 kfree(inst_modpriv);
2125 } 2117 }
2126 WARN(!list_empty(&netcp_device->interface_head), "%s interface list not empty!\n",
2127 pdev->name);
2128 2118
2129 devm_kfree(&pdev->dev, netcp_device); 2119 /* now that all modules are removed, clean up the interfaces */
2120 list_for_each_entry_safe(netcp_intf, netcp_tmp,
2121 &netcp_device->interface_head,
2122 interface_list) {
2123 netcp_delete_interface(netcp_device, netcp_intf->ndev);
2124 }
2125
2126 WARN(!list_empty(&netcp_device->interface_head),
2127 "%s interface list not empty!\n", pdev->name);
2128
2130 pm_runtime_put_sync(&pdev->dev); 2129 pm_runtime_put_sync(&pdev->dev);
2131 pm_runtime_disable(&pdev->dev); 2130 pm_runtime_disable(&pdev->dev);
2132 platform_set_drvdata(pdev, NULL); 2131 platform_set_drvdata(pdev, NULL);
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 9b7e0a34c98b..1974a8ae764a 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -1901,11 +1901,28 @@ static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
1901 writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control)); 1901 writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control));
1902} 1902}
1903 1903
1904static void gbe_sgmii_rtreset(struct gbe_priv *priv,
1905 struct gbe_slave *slave, bool set)
1906{
1907 void __iomem *sgmii_port_regs;
1908
1909 if (SLAVE_LINK_IS_XGMII(slave))
1910 return;
1911
1912 if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2))
1913 sgmii_port_regs = priv->sgmii_port34_regs;
1914 else
1915 sgmii_port_regs = priv->sgmii_port_regs;
1916
1917 netcp_sgmii_rtreset(sgmii_port_regs, slave->slave_num, set);
1918}
1919
1904static void gbe_slave_stop(struct gbe_intf *intf) 1920static void gbe_slave_stop(struct gbe_intf *intf)
1905{ 1921{
1906 struct gbe_priv *gbe_dev = intf->gbe_dev; 1922 struct gbe_priv *gbe_dev = intf->gbe_dev;
1907 struct gbe_slave *slave = intf->slave; 1923 struct gbe_slave *slave = intf->slave;
1908 1924
1925 gbe_sgmii_rtreset(gbe_dev, slave, true);
1909 gbe_port_reset(slave); 1926 gbe_port_reset(slave);
1910 /* Disable forwarding */ 1927 /* Disable forwarding */
1911 cpsw_ale_control_set(gbe_dev->ale, slave->port_num, 1928 cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
@@ -1947,6 +1964,7 @@ static int gbe_slave_open(struct gbe_intf *gbe_intf)
1947 1964
1948 gbe_sgmii_config(priv, slave); 1965 gbe_sgmii_config(priv, slave);
1949 gbe_port_reset(slave); 1966 gbe_port_reset(slave);
1967 gbe_sgmii_rtreset(priv, slave, false);
1950 gbe_port_config(priv, slave, priv->rx_packet_max); 1968 gbe_port_config(priv, slave, priv->rx_packet_max);
1951 gbe_set_slave_mac(slave, gbe_intf); 1969 gbe_set_slave_mac(slave, gbe_intf);
1952 /* enable forwarding */ 1970 /* enable forwarding */
@@ -2490,10 +2508,9 @@ static void free_secondary_ports(struct gbe_priv *gbe_dev)
2490{ 2508{
2491 struct gbe_slave *slave; 2509 struct gbe_slave *slave;
2492 2510
2493 for (;;) { 2511 while (!list_empty(&gbe_dev->secondary_slaves)) {
2494 slave = first_sec_slave(gbe_dev); 2512 slave = first_sec_slave(gbe_dev);
2495 if (!slave) 2513
2496 break;
2497 if (slave->phy) 2514 if (slave->phy)
2498 phy_disconnect(slave->phy); 2515 phy_disconnect(slave->phy);
2499 list_del(&slave->slave_list); 2516 list_del(&slave->slave_list);
@@ -2839,14 +2856,13 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2839 &gbe_dev->dma_chan_name); 2856 &gbe_dev->dma_chan_name);
2840 if (ret < 0) { 2857 if (ret < 0) {
2841 dev_err(dev, "missing \"tx-channel\" parameter\n"); 2858 dev_err(dev, "missing \"tx-channel\" parameter\n");
2842 ret = -ENODEV; 2859 return -EINVAL;
2843 goto quit;
2844 } 2860 }
2845 2861
2846 if (!strcmp(node->name, "gbe")) { 2862 if (!strcmp(node->name, "gbe")) {
2847 ret = get_gbe_resource_version(gbe_dev, node); 2863 ret = get_gbe_resource_version(gbe_dev, node);
2848 if (ret) 2864 if (ret)
2849 goto quit; 2865 return ret;
2850 2866
2851 dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version); 2867 dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version);
2852 2868
@@ -2857,22 +2873,20 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2857 else 2873 else
2858 ret = -ENODEV; 2874 ret = -ENODEV;
2859 2875
2860 if (ret)
2861 goto quit;
2862 } else if (!strcmp(node->name, "xgbe")) { 2876 } else if (!strcmp(node->name, "xgbe")) {
2863 ret = set_xgbe_ethss10_priv(gbe_dev, node); 2877 ret = set_xgbe_ethss10_priv(gbe_dev, node);
2864 if (ret) 2878 if (ret)
2865 goto quit; 2879 return ret;
2866 ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs, 2880 ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs,
2867 gbe_dev->ss_regs); 2881 gbe_dev->ss_regs);
2868 if (ret)
2869 goto quit;
2870 } else { 2882 } else {
2871 dev_err(dev, "unknown GBE node(%s)\n", node->name); 2883 dev_err(dev, "unknown GBE node(%s)\n", node->name);
2872 ret = -ENODEV; 2884 ret = -ENODEV;
2873 goto quit;
2874 } 2885 }
2875 2886
2887 if (ret)
2888 return ret;
2889
2876 interfaces = of_get_child_by_name(node, "interfaces"); 2890 interfaces = of_get_child_by_name(node, "interfaces");
2877 if (!interfaces) 2891 if (!interfaces)
2878 dev_err(dev, "could not find interfaces\n"); 2892 dev_err(dev, "could not find interfaces\n");
@@ -2880,11 +2894,11 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2880 ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device, 2894 ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
2881 gbe_dev->dma_chan_name, gbe_dev->tx_queue_id); 2895 gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
2882 if (ret) 2896 if (ret)
2883 goto quit; 2897 return ret;
2884 2898
2885 ret = netcp_txpipe_open(&gbe_dev->tx_pipe); 2899 ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
2886 if (ret) 2900 if (ret)
2887 goto quit; 2901 return ret;
2888 2902
2889 /* Create network interfaces */ 2903 /* Create network interfaces */
2890 INIT_LIST_HEAD(&gbe_dev->gbe_intf_head); 2904 INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
@@ -2899,6 +2913,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2899 if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) 2913 if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves)
2900 break; 2914 break;
2901 } 2915 }
2916 of_node_put(interfaces);
2902 2917
2903 if (!gbe_dev->num_slaves) 2918 if (!gbe_dev->num_slaves)
2904 dev_warn(dev, "No network interface configured\n"); 2919 dev_warn(dev, "No network interface configured\n");
@@ -2911,9 +2926,10 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2911 of_node_put(secondary_ports); 2926 of_node_put(secondary_ports);
2912 2927
2913 if (!gbe_dev->num_slaves) { 2928 if (!gbe_dev->num_slaves) {
2914 dev_err(dev, "No network interface or secondary ports configured\n"); 2929 dev_err(dev,
2930 "No network interface or secondary ports configured\n");
2915 ret = -ENODEV; 2931 ret = -ENODEV;
2916 goto quit; 2932 goto free_sec_ports;
2917 } 2933 }
2918 2934
2919 memset(&ale_params, 0, sizeof(ale_params)); 2935 memset(&ale_params, 0, sizeof(ale_params));
@@ -2927,7 +2943,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2927 if (!gbe_dev->ale) { 2943 if (!gbe_dev->ale) {
2928 dev_err(gbe_dev->dev, "error initializing ale engine\n"); 2944 dev_err(gbe_dev->dev, "error initializing ale engine\n");
2929 ret = -ENODEV; 2945 ret = -ENODEV;
2930 goto quit; 2946 goto free_sec_ports;
2931 } else { 2947 } else {
2932 dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n"); 2948 dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
2933 } 2949 }
@@ -2943,14 +2959,8 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2943 *inst_priv = gbe_dev; 2959 *inst_priv = gbe_dev;
2944 return 0; 2960 return 0;
2945 2961
2946quit: 2962free_sec_ports:
2947 if (gbe_dev->hw_stats) 2963 free_secondary_ports(gbe_dev);
2948 devm_kfree(dev, gbe_dev->hw_stats);
2949 cpsw_ale_destroy(gbe_dev->ale);
2950 if (gbe_dev->ss_regs)
2951 devm_iounmap(dev, gbe_dev->ss_regs);
2952 of_node_put(interfaces);
2953 devm_kfree(dev, gbe_dev);
2954 return ret; 2964 return ret;
2955} 2965}
2956 2966
@@ -3023,12 +3033,9 @@ static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
3023 free_secondary_ports(gbe_dev); 3033 free_secondary_ports(gbe_dev);
3024 3034
3025 if (!list_empty(&gbe_dev->gbe_intf_head)) 3035 if (!list_empty(&gbe_dev->gbe_intf_head))
3026 dev_alert(gbe_dev->dev, "unreleased ethss interfaces present\n"); 3036 dev_alert(gbe_dev->dev,
3037 "unreleased ethss interfaces present\n");
3027 3038
3028 devm_kfree(gbe_dev->dev, gbe_dev->hw_stats);
3029 devm_iounmap(gbe_dev->dev, gbe_dev->ss_regs);
3030 memset(gbe_dev, 0x00, sizeof(*gbe_dev));
3031 devm_kfree(gbe_dev->dev, gbe_dev);
3032 return 0; 3039 return 0;
3033} 3040}
3034 3041
diff --git a/drivers/net/ethernet/ti/netcp_sgmii.c b/drivers/net/ethernet/ti/netcp_sgmii.c
index dbeb14266e2f..5d8419f658d0 100644
--- a/drivers/net/ethernet/ti/netcp_sgmii.c
+++ b/drivers/net/ethernet/ti/netcp_sgmii.c
@@ -18,6 +18,9 @@
18 18
19#include "netcp.h" 19#include "netcp.h"
20 20
21#define SGMII_SRESET_RESET BIT(0)
22#define SGMII_SRESET_RTRESET BIT(1)
23
21#define SGMII_REG_STATUS_LOCK BIT(4) 24#define SGMII_REG_STATUS_LOCK BIT(4)
22#define SGMII_REG_STATUS_LINK BIT(0) 25#define SGMII_REG_STATUS_LINK BIT(0)
23#define SGMII_REG_STATUS_AUTONEG BIT(2) 26#define SGMII_REG_STATUS_AUTONEG BIT(2)
@@ -51,12 +54,35 @@ static void sgmii_write_reg_bit(void __iomem *base, int reg, u32 val)
51int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port) 54int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port)
52{ 55{
53 /* Soft reset */ 56 /* Soft reset */
54 sgmii_write_reg_bit(sgmii_ofs, SGMII_SRESET_REG(port), 0x1); 57 sgmii_write_reg_bit(sgmii_ofs, SGMII_SRESET_REG(port),
55 while (sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port)) != 0x0) 58 SGMII_SRESET_RESET);
59
60 while ((sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port)) &
61 SGMII_SRESET_RESET) != 0x0)
56 ; 62 ;
63
57 return 0; 64 return 0;
58} 65}
59 66
67/* port is 0 based */
68bool netcp_sgmii_rtreset(void __iomem *sgmii_ofs, int port, bool set)
69{
70 u32 reg;
71 bool oldval;
72
73 /* Initiate a soft reset */
74 reg = sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port));
75 oldval = (reg & SGMII_SRESET_RTRESET) != 0x0;
76 if (set)
77 reg |= SGMII_SRESET_RTRESET;
78 else
79 reg &= ~SGMII_SRESET_RTRESET;
80 sgmii_write_reg(sgmii_ofs, SGMII_SRESET_REG(port), reg);
81 wmb();
82
83 return oldval;
84}
85
60int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port) 86int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port)
61{ 87{
62 u32 status = 0, link = 0; 88 u32 status = 0, link = 0;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 4208dd7ef101..d95f9aae95e7 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1530,9 +1530,9 @@ static int axienet_probe(struct platform_device *pdev)
1530 /* Map device registers */ 1530 /* Map device registers */
1531 ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1531 ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1532 lp->regs = devm_ioremap_resource(&pdev->dev, ethres); 1532 lp->regs = devm_ioremap_resource(&pdev->dev, ethres);
1533 if (!lp->regs) { 1533 if (IS_ERR(lp->regs)) {
1534 dev_err(&pdev->dev, "could not map Axi Ethernet regs.\n"); 1534 dev_err(&pdev->dev, "could not map Axi Ethernet regs.\n");
1535 ret = -ENOMEM; 1535 ret = PTR_ERR(lp->regs);
1536 goto free_netdev; 1536 goto free_netdev;
1537 } 1537 }
1538 1538
@@ -1599,9 +1599,9 @@ static int axienet_probe(struct platform_device *pdev)
1599 goto free_netdev; 1599 goto free_netdev;
1600 } 1600 }
1601 lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares); 1601 lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares);
1602 if (!lp->dma_regs) { 1602 if (IS_ERR(lp->dma_regs)) {
1603 dev_err(&pdev->dev, "could not map DMA regs\n"); 1603 dev_err(&pdev->dev, "could not map DMA regs\n");
1604 ret = -ENOMEM; 1604 ret = PTR_ERR(lp->dma_regs);
1605 goto free_netdev; 1605 goto free_netdev;
1606 } 1606 }
1607 lp->rx_irq = irq_of_parse_and_map(np, 1); 1607 lp->rx_irq = irq_of_parse_and_map(np, 1);
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index 7856b6ccf5c5..d95a50ae996d 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -482,6 +482,7 @@ static void bpq_setup(struct net_device *dev)
482 memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN); 482 memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
483 483
484 dev->flags = 0; 484 dev->flags = 0;
485 dev->features = NETIF_F_LLTX; /* Allow recursion */
485 486
486#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) 487#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
487 dev->header_ops = &ax25_header_ops; 488 dev->header_ops = &ax25_header_ops;
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 2ffbf13471d0..216bfd350169 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -728,11 +728,12 @@ static int mkiss_open(struct tty_struct *tty)
728 dev->type = ARPHRD_AX25; 728 dev->type = ARPHRD_AX25;
729 729
730 /* Perform the low-level AX25 initialization. */ 730 /* Perform the low-level AX25 initialization. */
731 if ((err = ax_open(ax->dev))) { 731 err = ax_open(ax->dev);
732 if (err)
732 goto out_free_netdev; 733 goto out_free_netdev;
733 }
734 734
735 if (register_netdev(dev)) 735 err = register_netdev(dev);
736 if (err)
736 goto out_free_buffers; 737 goto out_free_buffers;
737 738
738 /* after register_netdev() - because else printk smashes the kernel */ 739 /* after register_netdev() - because else printk smashes the kernel */
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h
index 953a97492fab..9542b7bac61a 100644
--- a/drivers/net/ipvlan/ipvlan.h
+++ b/drivers/net/ipvlan/ipvlan.h
@@ -67,8 +67,6 @@ struct ipvl_dev {
67 struct ipvl_port *port; 67 struct ipvl_port *port;
68 struct net_device *phy_dev; 68 struct net_device *phy_dev;
69 struct list_head addrs; 69 struct list_head addrs;
70 int ipv4cnt;
71 int ipv6cnt;
72 struct ipvl_pcpu_stats __percpu *pcpu_stats; 70 struct ipvl_pcpu_stats __percpu *pcpu_stats;
73 DECLARE_BITMAP(mac_filters, IPVLAN_MAC_FILTER_SIZE); 71 DECLARE_BITMAP(mac_filters, IPVLAN_MAC_FILTER_SIZE);
74 netdev_features_t sfeatures; 72 netdev_features_t sfeatures;
@@ -106,6 +104,11 @@ static inline struct ipvl_port *ipvlan_port_get_rcu(const struct net_device *d)
106 return rcu_dereference(d->rx_handler_data); 104 return rcu_dereference(d->rx_handler_data);
107} 105}
108 106
107static inline struct ipvl_port *ipvlan_port_get_rcu_bh(const struct net_device *d)
108{
109 return rcu_dereference_bh(d->rx_handler_data);
110}
111
109static inline struct ipvl_port *ipvlan_port_get_rtnl(const struct net_device *d) 112static inline struct ipvl_port *ipvlan_port_get_rtnl(const struct net_device *d)
110{ 113{
111 return rtnl_dereference(d->rx_handler_data); 114 return rtnl_dereference(d->rx_handler_data);
@@ -124,5 +127,5 @@ struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
124bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6); 127bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6);
125struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port, 128struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port,
126 const void *iaddr, bool is_v6); 129 const void *iaddr, bool is_v6);
127void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync); 130void ipvlan_ht_addr_del(struct ipvl_addr *addr);
128#endif /* __IPVLAN_H */ 131#endif /* __IPVLAN_H */
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 8afbedad620d..207f62e8de9a 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -85,11 +85,9 @@ void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr)
85 hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]); 85 hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]);
86} 86}
87 87
88void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync) 88void ipvlan_ht_addr_del(struct ipvl_addr *addr)
89{ 89{
90 hlist_del_init_rcu(&addr->hlnode); 90 hlist_del_init_rcu(&addr->hlnode);
91 if (sync)
92 synchronize_rcu();
93} 91}
94 92
95struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan, 93struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
@@ -531,7 +529,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
531int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) 529int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
532{ 530{
533 struct ipvl_dev *ipvlan = netdev_priv(dev); 531 struct ipvl_dev *ipvlan = netdev_priv(dev);
534 struct ipvl_port *port = ipvlan_port_get_rcu(ipvlan->phy_dev); 532 struct ipvl_port *port = ipvlan_port_get_rcu_bh(ipvlan->phy_dev);
535 533
536 if (!port) 534 if (!port)
537 goto out; 535 goto out;
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 1acc283160d9..20b58bdecf75 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -153,10 +153,9 @@ static int ipvlan_open(struct net_device *dev)
153 else 153 else
154 dev->flags &= ~IFF_NOARP; 154 dev->flags &= ~IFF_NOARP;
155 155
156 if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { 156 list_for_each_entry(addr, &ipvlan->addrs, anode)
157 list_for_each_entry(addr, &ipvlan->addrs, anode) 157 ipvlan_ht_addr_add(ipvlan, addr);
158 ipvlan_ht_addr_add(ipvlan, addr); 158
159 }
160 return dev_uc_add(phy_dev, phy_dev->dev_addr); 159 return dev_uc_add(phy_dev, phy_dev->dev_addr);
161} 160}
162 161
@@ -171,10 +170,9 @@ static int ipvlan_stop(struct net_device *dev)
171 170
172 dev_uc_del(phy_dev, phy_dev->dev_addr); 171 dev_uc_del(phy_dev, phy_dev->dev_addr);
173 172
174 if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { 173 list_for_each_entry(addr, &ipvlan->addrs, anode)
175 list_for_each_entry(addr, &ipvlan->addrs, anode) 174 ipvlan_ht_addr_del(addr);
176 ipvlan_ht_addr_del(addr, !dev->dismantle); 175
177 }
178 return 0; 176 return 0;
179} 177}
180 178
@@ -471,8 +469,6 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev,
471 ipvlan->port = port; 469 ipvlan->port = port;
472 ipvlan->sfeatures = IPVLAN_FEATURES; 470 ipvlan->sfeatures = IPVLAN_FEATURES;
473 INIT_LIST_HEAD(&ipvlan->addrs); 471 INIT_LIST_HEAD(&ipvlan->addrs);
474 ipvlan->ipv4cnt = 0;
475 ipvlan->ipv6cnt = 0;
476 472
477 /* TODO Probably put random address here to be presented to the 473 /* TODO Probably put random address here to be presented to the
478 * world but keep using the physical-dev address for the outgoing 474 * world but keep using the physical-dev address for the outgoing
@@ -508,12 +504,12 @@ static void ipvlan_link_delete(struct net_device *dev, struct list_head *head)
508 struct ipvl_dev *ipvlan = netdev_priv(dev); 504 struct ipvl_dev *ipvlan = netdev_priv(dev);
509 struct ipvl_addr *addr, *next; 505 struct ipvl_addr *addr, *next;
510 506
511 if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { 507 list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) {
512 list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) { 508 ipvlan_ht_addr_del(addr);
513 ipvlan_ht_addr_del(addr, !dev->dismantle); 509 list_del(&addr->anode);
514 list_del(&addr->anode); 510 kfree_rcu(addr, rcu);
515 }
516 } 511 }
512
517 list_del_rcu(&ipvlan->pnode); 513 list_del_rcu(&ipvlan->pnode);
518 unregister_netdevice_queue(dev, head); 514 unregister_netdevice_queue(dev, head);
519 netdev_upper_dev_unlink(ipvlan->phy_dev, dev); 515 netdev_upper_dev_unlink(ipvlan->phy_dev, dev);
@@ -627,7 +623,7 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
627 memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr)); 623 memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr));
628 addr->atype = IPVL_IPV6; 624 addr->atype = IPVL_IPV6;
629 list_add_tail(&addr->anode, &ipvlan->addrs); 625 list_add_tail(&addr->anode, &ipvlan->addrs);
630 ipvlan->ipv6cnt++; 626
631 /* If the interface is not up, the address will be added to the hash 627 /* If the interface is not up, the address will be added to the hash
632 * list by ipvlan_open. 628 * list by ipvlan_open.
633 */ 629 */
@@ -645,10 +641,8 @@ static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
645 if (!addr) 641 if (!addr)
646 return; 642 return;
647 643
648 ipvlan_ht_addr_del(addr, true); 644 ipvlan_ht_addr_del(addr);
649 list_del(&addr->anode); 645 list_del(&addr->anode);
650 ipvlan->ipv6cnt--;
651 WARN_ON(ipvlan->ipv6cnt < 0);
652 kfree_rcu(addr, rcu); 646 kfree_rcu(addr, rcu);
653 647
654 return; 648 return;
@@ -661,6 +655,10 @@ static int ipvlan_addr6_event(struct notifier_block *unused,
661 struct net_device *dev = (struct net_device *)if6->idev->dev; 655 struct net_device *dev = (struct net_device *)if6->idev->dev;
662 struct ipvl_dev *ipvlan = netdev_priv(dev); 656 struct ipvl_dev *ipvlan = netdev_priv(dev);
663 657
658 /* FIXME IPv6 autoconf calls us from bh without RTNL */
659 if (in_softirq())
660 return NOTIFY_DONE;
661
664 if (!netif_is_ipvlan(dev)) 662 if (!netif_is_ipvlan(dev))
665 return NOTIFY_DONE; 663 return NOTIFY_DONE;
666 664
@@ -699,7 +697,7 @@ static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
699 memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr)); 697 memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr));
700 addr->atype = IPVL_IPV4; 698 addr->atype = IPVL_IPV4;
701 list_add_tail(&addr->anode, &ipvlan->addrs); 699 list_add_tail(&addr->anode, &ipvlan->addrs);
702 ipvlan->ipv4cnt++; 700
703 /* If the interface is not up, the address will be added to the hash 701 /* If the interface is not up, the address will be added to the hash
704 * list by ipvlan_open. 702 * list by ipvlan_open.
705 */ 703 */
@@ -717,10 +715,8 @@ static void ipvlan_del_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
717 if (!addr) 715 if (!addr)
718 return; 716 return;
719 717
720 ipvlan_ht_addr_del(addr, true); 718 ipvlan_ht_addr_del(addr);
721 list_del(&addr->anode); 719 list_del(&addr->anode);
722 ipvlan->ipv4cnt--;
723 WARN_ON(ipvlan->ipv4cnt < 0);
724 kfree_rcu(addr, rcu); 720 kfree_rcu(addr, rcu);
725 721
726 return; 722 return;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index f8370808a018..edd77342773a 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -719,6 +719,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
719 struct virtio_net_hdr vnet_hdr = { 0 }; 719 struct virtio_net_hdr vnet_hdr = { 0 };
720 int vnet_hdr_len = 0; 720 int vnet_hdr_len = 0;
721 int copylen = 0; 721 int copylen = 0;
722 int depth;
722 bool zerocopy = false; 723 bool zerocopy = false;
723 size_t linear; 724 size_t linear;
724 ssize_t n; 725 ssize_t n;
@@ -804,6 +805,12 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
804 805
805 skb_probe_transport_header(skb, ETH_HLEN); 806 skb_probe_transport_header(skb, ETH_HLEN);
806 807
808 /* Move network header to the right position for VLAN tagged packets */
809 if ((skb->protocol == htons(ETH_P_8021Q) ||
810 skb->protocol == htons(ETH_P_8021AD)) &&
811 __vlan_get_protocol(skb, skb->protocol, &depth) != 0)
812 skb_set_network_header(skb, depth);
813
807 rcu_read_lock(); 814 rcu_read_lock();
808 vlan = rcu_dereference(q->vlan); 815 vlan = rcu_dereference(q->vlan);
809 /* copy skb_ubuf_info for callback when skb has no error */ 816 /* copy skb_ubuf_info for callback when skb has no error */
@@ -1355,6 +1362,7 @@ static void macvtap_exit(void)
1355 class_unregister(macvtap_class); 1362 class_unregister(macvtap_class);
1356 cdev_del(&macvtap_cdev); 1363 cdev_del(&macvtap_cdev);
1357 unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS); 1364 unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
1365 idr_destroy(&minor_idr);
1358} 1366}
1359module_exit(macvtap_exit); 1367module_exit(macvtap_exit);
1360 1368
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index 3cc316cb7e6b..d8757bf9ad75 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -102,6 +102,12 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
102 102
103 netdev_dbg(ndev, "%s: %d byte payload received\n", __func__, len); 103 netdev_dbg(ndev, "%s: %d byte payload received\n", __func__, len);
104 104
105 if (len < 0) {
106 ndev->stats.rx_errors++;
107 ndev->stats.rx_length_errors++;
108 goto enqueue_again;
109 }
110
105 skb_put(skb, len); 111 skb_put(skb, len);
106 skb->protocol = eth_type_trans(skb, ndev); 112 skb->protocol = eth_type_trans(skb, ndev);
107 skb->ip_summed = CHECKSUM_NONE; 113 skb->ip_summed = CHECKSUM_NONE;
@@ -121,6 +127,7 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
121 return; 127 return;
122 } 128 }
123 129
130enqueue_again:
124 rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN); 131 rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN);
125 if (rc) { 132 if (rc) {
126 dev_kfree_skb(skb); 133 dev_kfree_skb(skb);
@@ -184,7 +191,7 @@ static int ntb_netdev_open(struct net_device *ndev)
184 191
185 rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data, 192 rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
186 ndev->mtu + ETH_HLEN); 193 ndev->mtu + ETH_HLEN);
187 if (rc == -EINVAL) { 194 if (rc) {
188 dev_kfree_skb(skb); 195 dev_kfree_skb(skb);
189 goto err; 196 goto err;
190 } 197 }
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index cf18940f4e84..cb86d7a01542 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -191,7 +191,7 @@ config MDIO_BUS_MUX_GPIO
191 191
192config MDIO_BUS_MUX_MMIOREG 192config MDIO_BUS_MUX_MMIOREG
193 tristate "Support for MMIO device-controlled MDIO bus multiplexers" 193 tristate "Support for MMIO device-controlled MDIO bus multiplexers"
194 depends on OF_MDIO 194 depends on OF_MDIO && HAS_IOMEM
195 select MDIO_BUS_MUX 195 select MDIO_BUS_MUX
196 help 196 help
197 This module provides a driver for MDIO bus multiplexers that 197 This module provides a driver for MDIO bus multiplexers that
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index c7a12e2e07b7..8a3bf5469892 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -164,7 +164,7 @@ static int dp83867_config_init(struct phy_device *phydev)
164 return ret; 164 return ret;
165 } 165 }
166 166
167 if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) || 167 if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) &&
168 (phydev->interface <= PHY_INTERFACE_MODE_RGMII_RXID)) { 168 (phydev->interface <= PHY_INTERFACE_MODE_RGMII_RXID)) {
169 val = phy_read_mmd_indirect(phydev, DP83867_RGMIICTL, 169 val = phy_read_mmd_indirect(phydev, DP83867_RGMIICTL,
170 DP83867_DEVADDR, phydev->addr); 170 DP83867_DEVADDR, phydev->addr);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 095ef3fe369a..46a14cbb0215 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -421,6 +421,8 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv)
421{ 421{
422 struct phy_device *phydev = to_phy_device(dev); 422 struct phy_device *phydev = to_phy_device(dev);
423 struct phy_driver *phydrv = to_phy_driver(drv); 423 struct phy_driver *phydrv = to_phy_driver(drv);
424 const int num_ids = ARRAY_SIZE(phydev->c45_ids.device_ids);
425 int i;
424 426
425 if (of_driver_match_device(dev, drv)) 427 if (of_driver_match_device(dev, drv))
426 return 1; 428 return 1;
@@ -428,8 +430,21 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv)
428 if (phydrv->match_phy_device) 430 if (phydrv->match_phy_device)
429 return phydrv->match_phy_device(phydev); 431 return phydrv->match_phy_device(phydev);
430 432
431 return (phydrv->phy_id & phydrv->phy_id_mask) == 433 if (phydev->is_c45) {
432 (phydev->phy_id & phydrv->phy_id_mask); 434 for (i = 1; i < num_ids; i++) {
435 if (!(phydev->c45_ids.devices_in_package & (1 << i)))
436 continue;
437
438 if ((phydrv->phy_id & phydrv->phy_id_mask) ==
439 (phydev->c45_ids.device_ids[i] &
440 phydrv->phy_id_mask))
441 return 1;
442 }
443 return 0;
444 } else {
445 return (phydrv->phy_id & phydrv->phy_id_mask) ==
446 (phydev->phy_id & phydrv->phy_id_mask);
447 }
433} 448}
434 449
435#ifdef CONFIG_PM 450#ifdef CONFIG_PM
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index b2197b506acb..1e1fbb049ec6 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -811,6 +811,7 @@ void phy_state_machine(struct work_struct *work)
811 bool needs_aneg = false, do_suspend = false; 811 bool needs_aneg = false, do_suspend = false;
812 enum phy_state old_state; 812 enum phy_state old_state;
813 int err = 0; 813 int err = 0;
814 int old_link;
814 815
815 mutex_lock(&phydev->lock); 816 mutex_lock(&phydev->lock);
816 817
@@ -896,11 +897,18 @@ void phy_state_machine(struct work_struct *work)
896 phydev->adjust_link(phydev->attached_dev); 897 phydev->adjust_link(phydev->attached_dev);
897 break; 898 break;
898 case PHY_RUNNING: 899 case PHY_RUNNING:
899 /* Only register a CHANGE if we are 900 /* Only register a CHANGE if we are polling or ignoring
900 * polling or ignoring interrupts 901 * interrupts and link changed since latest checking.
901 */ 902 */
902 if (!phy_interrupt_is_valid(phydev)) 903 if (!phy_interrupt_is_valid(phydev)) {
903 phydev->state = PHY_CHANGELINK; 904 old_link = phydev->link;
905 err = phy_read_status(phydev);
906 if (err)
907 break;
908
909 if (old_link != phydev->link)
910 phydev->state = PHY_CHANGELINK;
911 }
904 break; 912 break;
905 case PHY_CHANGELINK: 913 case PHY_CHANGELINK:
906 err = phy_read_status(phydev); 914 err = phy_read_status(phydev);
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index c0f6479e19d4..70b08958763a 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -91,19 +91,18 @@ static int lan911x_config_init(struct phy_device *phydev)
91} 91}
92 92
93/* 93/*
94 * The LAN8710/LAN8720 requires a minimum of 2 link pulses within 64ms of each 94 * The LAN87xx suffers from rare absence of the ENERGYON-bit when Ethernet cable
95 * other in order to set the ENERGYON bit and exit EDPD mode. If a link partner 95 * plugs in while LAN87xx is in Energy Detect Power-Down mode. This leads to
96 * does send the pulses within this interval, the PHY will remained powered 96 * unstable detection of plugging in Ethernet cable.
97 * down. 97 * This workaround disables Energy Detect Power-Down mode and waiting for
98 * 98 * response on link pulses to detect presence of plugged Ethernet cable.
99 * This workaround will manually toggle the PHY on/off upon calls to read_status 99 * The Energy Detect Power-Down mode is enabled again in the end of procedure to
100 * in order to generate link test pulses if the link is down. If a link partner 100 * save approximately 220 mW of power if cable is unplugged.
101 * is present, it will respond to the pulses, which will cause the ENERGYON bit
102 * to be set and will cause the EDPD mode to be exited.
103 */ 101 */
104static int lan87xx_read_status(struct phy_device *phydev) 102static int lan87xx_read_status(struct phy_device *phydev)
105{ 103{
106 int err = genphy_read_status(phydev); 104 int err = genphy_read_status(phydev);
105 int i;
107 106
108 if (!phydev->link) { 107 if (!phydev->link) {
109 /* Disable EDPD to wake up PHY */ 108 /* Disable EDPD to wake up PHY */
@@ -116,8 +115,16 @@ static int lan87xx_read_status(struct phy_device *phydev)
116 if (rc < 0) 115 if (rc < 0)
117 return rc; 116 return rc;
118 117
119 /* Sleep 64 ms to allow ~5 link test pulses to be sent */ 118 /* Wait max 640 ms to detect energy */
120 msleep(64); 119 for (i = 0; i < 64; i++) {
120 /* Sleep to allow link test pulses to be sent */
121 msleep(10);
122 rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
123 if (rc < 0)
124 return rc;
125 if (rc & MII_LAN83C185_ENERGYON)
126 break;
127 }
121 128
122 /* Re-enable EDPD */ 129 /* Re-enable EDPD */
123 rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS); 130 rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
@@ -191,7 +198,7 @@ static struct phy_driver smsc_phy_driver[] = {
191 198
192 /* basic functions */ 199 /* basic functions */
193 .config_aneg = genphy_config_aneg, 200 .config_aneg = genphy_config_aneg,
194 .read_status = genphy_read_status, 201 .read_status = lan87xx_read_status,
195 .config_init = smsc_phy_config_init, 202 .config_init = smsc_phy_config_init,
196 .soft_reset = smsc_phy_reset, 203 .soft_reset = smsc_phy_reset,
197 204
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 9d15566521a7..fa8f5046afe9 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -269,9 +269,9 @@ static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound);
269static void ppp_ccp_closed(struct ppp *ppp); 269static void ppp_ccp_closed(struct ppp *ppp);
270static struct compressor *find_compressor(int type); 270static struct compressor *find_compressor(int type);
271static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st); 271static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st);
272static struct ppp *ppp_create_interface(struct net *net, int unit, int *retp); 272static struct ppp *ppp_create_interface(struct net *net, int unit,
273 struct file *file, int *retp);
273static void init_ppp_file(struct ppp_file *pf, int kind); 274static void init_ppp_file(struct ppp_file *pf, int kind);
274static void ppp_shutdown_interface(struct ppp *ppp);
275static void ppp_destroy_interface(struct ppp *ppp); 275static void ppp_destroy_interface(struct ppp *ppp);
276static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit); 276static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit);
277static struct channel *ppp_find_channel(struct ppp_net *pn, int unit); 277static struct channel *ppp_find_channel(struct ppp_net *pn, int unit);
@@ -392,8 +392,10 @@ static int ppp_release(struct inode *unused, struct file *file)
392 file->private_data = NULL; 392 file->private_data = NULL;
393 if (pf->kind == INTERFACE) { 393 if (pf->kind == INTERFACE) {
394 ppp = PF_TO_PPP(pf); 394 ppp = PF_TO_PPP(pf);
395 rtnl_lock();
395 if (file == ppp->owner) 396 if (file == ppp->owner)
396 ppp_shutdown_interface(ppp); 397 unregister_netdevice(ppp->dev);
398 rtnl_unlock();
397 } 399 }
398 if (atomic_dec_and_test(&pf->refcnt)) { 400 if (atomic_dec_and_test(&pf->refcnt)) {
399 switch (pf->kind) { 401 switch (pf->kind) {
@@ -593,8 +595,10 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
593 mutex_lock(&ppp_mutex); 595 mutex_lock(&ppp_mutex);
594 if (pf->kind == INTERFACE) { 596 if (pf->kind == INTERFACE) {
595 ppp = PF_TO_PPP(pf); 597 ppp = PF_TO_PPP(pf);
598 rtnl_lock();
596 if (file == ppp->owner) 599 if (file == ppp->owner)
597 ppp_shutdown_interface(ppp); 600 unregister_netdevice(ppp->dev);
601 rtnl_unlock();
598 } 602 }
599 if (atomic_long_read(&file->f_count) < 2) { 603 if (atomic_long_read(&file->f_count) < 2) {
600 ppp_release(NULL, file); 604 ppp_release(NULL, file);
@@ -838,11 +842,10 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
838 /* Create a new ppp unit */ 842 /* Create a new ppp unit */
839 if (get_user(unit, p)) 843 if (get_user(unit, p))
840 break; 844 break;
841 ppp = ppp_create_interface(net, unit, &err); 845 ppp = ppp_create_interface(net, unit, file, &err);
842 if (!ppp) 846 if (!ppp)
843 break; 847 break;
844 file->private_data = &ppp->file; 848 file->private_data = &ppp->file;
845 ppp->owner = file;
846 err = -EFAULT; 849 err = -EFAULT;
847 if (put_user(ppp->file.index, p)) 850 if (put_user(ppp->file.index, p))
848 break; 851 break;
@@ -916,6 +919,16 @@ static __net_init int ppp_init_net(struct net *net)
916static __net_exit void ppp_exit_net(struct net *net) 919static __net_exit void ppp_exit_net(struct net *net)
917{ 920{
918 struct ppp_net *pn = net_generic(net, ppp_net_id); 921 struct ppp_net *pn = net_generic(net, ppp_net_id);
922 struct ppp *ppp;
923 LIST_HEAD(list);
924 int id;
925
926 rtnl_lock();
927 idr_for_each_entry(&pn->units_idr, ppp, id)
928 unregister_netdevice_queue(ppp->dev, &list);
929
930 unregister_netdevice_many(&list);
931 rtnl_unlock();
919 932
920 idr_destroy(&pn->units_idr); 933 idr_destroy(&pn->units_idr);
921} 934}
@@ -1088,8 +1101,28 @@ static int ppp_dev_init(struct net_device *dev)
1088 return 0; 1101 return 0;
1089} 1102}
1090 1103
1104static void ppp_dev_uninit(struct net_device *dev)
1105{
1106 struct ppp *ppp = netdev_priv(dev);
1107 struct ppp_net *pn = ppp_pernet(ppp->ppp_net);
1108
1109 ppp_lock(ppp);
1110 ppp->closing = 1;
1111 ppp_unlock(ppp);
1112
1113 mutex_lock(&pn->all_ppp_mutex);
1114 unit_put(&pn->units_idr, ppp->file.index);
1115 mutex_unlock(&pn->all_ppp_mutex);
1116
1117 ppp->owner = NULL;
1118
1119 ppp->file.dead = 1;
1120 wake_up_interruptible(&ppp->file.rwait);
1121}
1122
1091static const struct net_device_ops ppp_netdev_ops = { 1123static const struct net_device_ops ppp_netdev_ops = {
1092 .ndo_init = ppp_dev_init, 1124 .ndo_init = ppp_dev_init,
1125 .ndo_uninit = ppp_dev_uninit,
1093 .ndo_start_xmit = ppp_start_xmit, 1126 .ndo_start_xmit = ppp_start_xmit,
1094 .ndo_do_ioctl = ppp_net_ioctl, 1127 .ndo_do_ioctl = ppp_net_ioctl,
1095 .ndo_get_stats64 = ppp_get_stats64, 1128 .ndo_get_stats64 = ppp_get_stats64,
@@ -2667,8 +2700,8 @@ ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
2667 * or if there is already a unit with the requested number. 2700 * or if there is already a unit with the requested number.
2668 * unit == -1 means allocate a new number. 2701 * unit == -1 means allocate a new number.
2669 */ 2702 */
2670static struct ppp * 2703static struct ppp *ppp_create_interface(struct net *net, int unit,
2671ppp_create_interface(struct net *net, int unit, int *retp) 2704 struct file *file, int *retp)
2672{ 2705{
2673 struct ppp *ppp; 2706 struct ppp *ppp;
2674 struct ppp_net *pn; 2707 struct ppp_net *pn;
@@ -2688,6 +2721,7 @@ ppp_create_interface(struct net *net, int unit, int *retp)
2688 ppp->mru = PPP_MRU; 2721 ppp->mru = PPP_MRU;
2689 init_ppp_file(&ppp->file, INTERFACE); 2722 init_ppp_file(&ppp->file, INTERFACE);
2690 ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */ 2723 ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */
2724 ppp->owner = file;
2691 for (i = 0; i < NUM_NP; ++i) 2725 for (i = 0; i < NUM_NP; ++i)
2692 ppp->npmode[i] = NPMODE_PASS; 2726 ppp->npmode[i] = NPMODE_PASS;
2693 INIT_LIST_HEAD(&ppp->channels); 2727 INIT_LIST_HEAD(&ppp->channels);
@@ -2776,34 +2810,6 @@ init_ppp_file(struct ppp_file *pf, int kind)
2776} 2810}
2777 2811
2778/* 2812/*
2779 * Take down a ppp interface unit - called when the owning file
2780 * (the one that created the unit) is closed or detached.
2781 */
2782static void ppp_shutdown_interface(struct ppp *ppp)
2783{
2784 struct ppp_net *pn;
2785
2786 pn = ppp_pernet(ppp->ppp_net);
2787 mutex_lock(&pn->all_ppp_mutex);
2788
2789 /* This will call dev_close() for us. */
2790 ppp_lock(ppp);
2791 if (!ppp->closing) {
2792 ppp->closing = 1;
2793 ppp_unlock(ppp);
2794 unregister_netdev(ppp->dev);
2795 unit_put(&pn->units_idr, ppp->file.index);
2796 } else
2797 ppp_unlock(ppp);
2798
2799 ppp->file.dead = 1;
2800 ppp->owner = NULL;
2801 wake_up_interruptible(&ppp->file.rwait);
2802
2803 mutex_unlock(&pn->all_ppp_mutex);
2804}
2805
2806/*
2807 * Free the memory used by a ppp unit. This is only called once 2813 * Free the memory used by a ppp unit. This is only called once
2808 * there are no channels connected to the unit and no file structs 2814 * there are no channels connected to the unit and no file structs
2809 * that reference the unit. 2815 * that reference the unit.
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 4545e78840b0..35a2bffe848a 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -523,6 +523,7 @@ static const struct driver_info wwan_info = {
523#define REALTEK_VENDOR_ID 0x0bda 523#define REALTEK_VENDOR_ID 0x0bda
524#define SAMSUNG_VENDOR_ID 0x04e8 524#define SAMSUNG_VENDOR_ID 0x04e8
525#define LENOVO_VENDOR_ID 0x17ef 525#define LENOVO_VENDOR_ID 0x17ef
526#define NVIDIA_VENDOR_ID 0x0955
526 527
527static const struct usb_device_id products[] = { 528static const struct usb_device_id products[] = {
528/* BLACKLIST !! 529/* BLACKLIST !!
@@ -710,6 +711,13 @@ static const struct usb_device_id products[] = {
710 .driver_info = 0, 711 .driver_info = 0,
711}, 712},
712 713
714/* NVIDIA Tegra USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
715{
716 USB_DEVICE_AND_INTERFACE_INFO(NVIDIA_VENDOR_ID, 0x09ff, USB_CLASS_COMM,
717 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
718 .driver_info = 0,
719},
720
713/* WHITELIST!!! 721/* WHITELIST!!!
714 * 722 *
715 * CDC Ether uses two interfaces, not necessarily consecutive. 723 * CDC Ether uses two interfaces, not necessarily consecutive.
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index e4b7a47a825c..efc18e05af0a 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -158,7 +158,7 @@ static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf)
158 if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) 158 if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
159 goto err; 159 goto err;
160 160
161 ret = cdc_ncm_bind_common(dev, intf, data_altsetting); 161 ret = cdc_ncm_bind_common(dev, intf, data_altsetting, 0);
162 if (ret) 162 if (ret)
163 goto err; 163 goto err;
164 164
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 8067b8fbb0ee..db40175b1a0b 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -6,7 +6,7 @@
6 * Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com> 6 * Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com>
7 * 7 *
8 * USB Host Driver for Network Control Model (NCM) 8 * USB Host Driver for Network Control Model (NCM)
9 * http://www.usb.org/developers/devclass_docs/NCM10.zip 9 * http://www.usb.org/developers/docs/devclass_docs/NCM10_012011.zip
10 * 10 *
11 * The NCM encoding, decoding and initialization logic 11 * The NCM encoding, decoding and initialization logic
12 * derives from FreeBSD 8.x. if_cdce.c and if_cdcereg.h 12 * derives from FreeBSD 8.x. if_cdce.c and if_cdcereg.h
@@ -684,10 +684,12 @@ static void cdc_ncm_free(struct cdc_ncm_ctx *ctx)
684 ctx->tx_curr_skb = NULL; 684 ctx->tx_curr_skb = NULL;
685 } 685 }
686 686
687 kfree(ctx->delayed_ndp16);
688
687 kfree(ctx); 689 kfree(ctx);
688} 690}
689 691
690int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting) 692int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags)
691{ 693{
692 const struct usb_cdc_union_desc *union_desc = NULL; 694 const struct usb_cdc_union_desc *union_desc = NULL;
693 struct cdc_ncm_ctx *ctx; 695 struct cdc_ncm_ctx *ctx;
@@ -855,6 +857,17 @@ advance:
855 /* finish setting up the device specific data */ 857 /* finish setting up the device specific data */
856 cdc_ncm_setup(dev); 858 cdc_ncm_setup(dev);
857 859
860 /* Device-specific flags */
861 ctx->drvflags = drvflags;
862
863 /* Allocate the delayed NDP if needed. */
864 if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
865 ctx->delayed_ndp16 = kzalloc(ctx->max_ndp_size, GFP_KERNEL);
866 if (!ctx->delayed_ndp16)
867 goto error2;
868 dev_info(&intf->dev, "NDP will be placed at end of frame for this device.");
869 }
870
858 /* override ethtool_ops */ 871 /* override ethtool_ops */
859 dev->net->ethtool_ops = &cdc_ncm_ethtool_ops; 872 dev->net->ethtool_ops = &cdc_ncm_ethtool_ops;
860 873
@@ -954,8 +967,11 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
954 if (cdc_ncm_select_altsetting(intf) != CDC_NCM_COMM_ALTSETTING_NCM) 967 if (cdc_ncm_select_altsetting(intf) != CDC_NCM_COMM_ALTSETTING_NCM)
955 return -ENODEV; 968 return -ENODEV;
956 969
957 /* The NCM data altsetting is fixed */ 970 /* The NCM data altsetting is fixed, so we hard-coded it.
958 ret = cdc_ncm_bind_common(dev, intf, CDC_NCM_DATA_ALTSETTING_NCM); 971 * Additionally, generic NCM devices are assumed to accept arbitrarily
972 * placed NDP.
973 */
974 ret = cdc_ncm_bind_common(dev, intf, CDC_NCM_DATA_ALTSETTING_NCM, 0);
959 975
960 /* 976 /*
961 * We should get an event when network connection is "connected" or 977 * We should get an event when network connection is "connected" or
@@ -986,6 +1002,14 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_
986 struct usb_cdc_ncm_nth16 *nth16 = (void *)skb->data; 1002 struct usb_cdc_ncm_nth16 *nth16 = (void *)skb->data;
987 size_t ndpoffset = le16_to_cpu(nth16->wNdpIndex); 1003 size_t ndpoffset = le16_to_cpu(nth16->wNdpIndex);
988 1004
1005 /* If NDP should be moved to the end of the NCM package, we can't follow the
1006 * NTH16 header as we would normally do. NDP isn't written to the SKB yet, and
1007 * the wNdpIndex field in the header is actually not consistent with reality. It will be later.
1008 */
1009 if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)
1010 if (ctx->delayed_ndp16->dwSignature == sign)
1011 return ctx->delayed_ndp16;
1012
989 /* follow the chain of NDPs, looking for a match */ 1013 /* follow the chain of NDPs, looking for a match */
990 while (ndpoffset) { 1014 while (ndpoffset) {
991 ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb->data + ndpoffset); 1015 ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb->data + ndpoffset);
@@ -995,7 +1019,8 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_
995 } 1019 }
996 1020
997 /* align new NDP */ 1021 /* align new NDP */
998 cdc_ncm_align_tail(skb, ctx->tx_ndp_modulus, 0, ctx->tx_max); 1022 if (!(ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END))
1023 cdc_ncm_align_tail(skb, ctx->tx_ndp_modulus, 0, ctx->tx_max);
999 1024
1000 /* verify that there is room for the NDP and the datagram (reserve) */ 1025 /* verify that there is room for the NDP and the datagram (reserve) */
1001 if ((ctx->tx_max - skb->len - reserve) < ctx->max_ndp_size) 1026 if ((ctx->tx_max - skb->len - reserve) < ctx->max_ndp_size)
@@ -1008,7 +1033,11 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_
1008 nth16->wNdpIndex = cpu_to_le16(skb->len); 1033 nth16->wNdpIndex = cpu_to_le16(skb->len);
1009 1034
1010 /* push a new empty NDP */ 1035 /* push a new empty NDP */
1011 ndp16 = (struct usb_cdc_ncm_ndp16 *)memset(skb_put(skb, ctx->max_ndp_size), 0, ctx->max_ndp_size); 1036 if (!(ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END))
1037 ndp16 = (struct usb_cdc_ncm_ndp16 *)memset(skb_put(skb, ctx->max_ndp_size), 0, ctx->max_ndp_size);
1038 else
1039 ndp16 = ctx->delayed_ndp16;
1040
1012 ndp16->dwSignature = sign; 1041 ndp16->dwSignature = sign;
1013 ndp16->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16) + sizeof(struct usb_cdc_ncm_dpe16)); 1042 ndp16->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16) + sizeof(struct usb_cdc_ncm_dpe16));
1014 return ndp16; 1043 return ndp16;
@@ -1023,6 +1052,15 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
1023 struct sk_buff *skb_out; 1052 struct sk_buff *skb_out;
1024 u16 n = 0, index, ndplen; 1053 u16 n = 0, index, ndplen;
1025 u8 ready2send = 0; 1054 u8 ready2send = 0;
1055 u32 delayed_ndp_size;
1056
1057 /* When our NDP gets written in cdc_ncm_ndp(), then skb_out->len gets updated
1058 * accordingly. Otherwise, we should check here.
1059 */
1060 if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)
1061 delayed_ndp_size = ctx->max_ndp_size;
1062 else
1063 delayed_ndp_size = 0;
1026 1064
1027 /* if there is a remaining skb, it gets priority */ 1065 /* if there is a remaining skb, it gets priority */
1028 if (skb != NULL) { 1066 if (skb != NULL) {
@@ -1077,7 +1115,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
1077 cdc_ncm_align_tail(skb_out, ctx->tx_modulus, ctx->tx_remainder, ctx->tx_max); 1115 cdc_ncm_align_tail(skb_out, ctx->tx_modulus, ctx->tx_remainder, ctx->tx_max);
1078 1116
1079 /* check if we had enough room left for both NDP and frame */ 1117 /* check if we had enough room left for both NDP and frame */
1080 if (!ndp16 || skb_out->len + skb->len > ctx->tx_max) { 1118 if (!ndp16 || skb_out->len + skb->len + delayed_ndp_size > ctx->tx_max) {
1081 if (n == 0) { 1119 if (n == 0) {
1082 /* won't fit, MTU problem? */ 1120 /* won't fit, MTU problem? */
1083 dev_kfree_skb_any(skb); 1121 dev_kfree_skb_any(skb);
@@ -1150,6 +1188,17 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
1150 /* variables will be reset at next call */ 1188 /* variables will be reset at next call */
1151 } 1189 }
1152 1190
1191 /* If requested, put NDP at end of frame. */
1192 if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
1193 nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
1194 cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_max);
1195 nth16->wNdpIndex = cpu_to_le16(skb_out->len);
1196 memcpy(skb_put(skb_out, ctx->max_ndp_size), ctx->delayed_ndp16, ctx->max_ndp_size);
1197
1198 /* Zero out delayed NDP - signature checking will naturally fail. */
1199 ndp16 = memset(ctx->delayed_ndp16, 0, ctx->max_ndp_size);
1200 }
1201
1153 /* If collected data size is less or equal ctx->min_tx_pkt 1202 /* If collected data size is less or equal ctx->min_tx_pkt
1154 * bytes, we send buffers as it is. If we get more data, it 1203 * bytes, we send buffers as it is. If we get more data, it
1155 * would be more efficient for USB HS mobile device with DMA 1204 * would be more efficient for USB HS mobile device with DMA
diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c
index 735f7dadb9a0..2680a65cd5e4 100644
--- a/drivers/net/usb/huawei_cdc_ncm.c
+++ b/drivers/net/usb/huawei_cdc_ncm.c
@@ -73,11 +73,14 @@ static int huawei_cdc_ncm_bind(struct usbnet *usbnet_dev,
73 struct usb_driver *subdriver = ERR_PTR(-ENODEV); 73 struct usb_driver *subdriver = ERR_PTR(-ENODEV);
74 int ret = -ENODEV; 74 int ret = -ENODEV;
75 struct huawei_cdc_ncm_state *drvstate = (void *)&usbnet_dev->data; 75 struct huawei_cdc_ncm_state *drvstate = (void *)&usbnet_dev->data;
76 int drvflags = 0;
76 77
77 /* altsetting should always be 1 for NCM devices - so we hard-coded 78 /* altsetting should always be 1 for NCM devices - so we hard-coded
78 * it here 79 * it here. Some huawei devices will need the NDP part of the NCM package to
80 * be at the end of the frame.
79 */ 81 */
80 ret = cdc_ncm_bind_common(usbnet_dev, intf, 1); 82 drvflags |= CDC_NCM_FLAG_NDP_TO_END;
83 ret = cdc_ncm_bind_common(usbnet_dev, intf, 1, drvflags);
81 if (ret) 84 if (ret)
82 goto err; 85 goto err;
83 86
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index f603f362504b..64a60afbe50c 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -757,6 +757,7 @@ static const struct usb_device_id products[] = {
757 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ 757 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
758 {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */ 758 {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */
759 {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */ 759 {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */
760 {QMI_FIXED_INTF(0x1199, 0x9041, 10)}, /* Sierra Wireless MC7305/MC7355 */
760 {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */ 761 {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */
761 {QMI_FIXED_INTF(0x1199, 0x9053, 8)}, /* Sierra Wireless Modem */ 762 {QMI_FIXED_INTF(0x1199, 0x9053, 8)}, /* Sierra Wireless Modem */
762 {QMI_FIXED_INTF(0x1199, 0x9054, 8)}, /* Sierra Wireless Modem */ 763 {QMI_FIXED_INTF(0x1199, 0x9054, 8)}, /* Sierra Wireless Modem */
@@ -784,6 +785,7 @@ static const struct usb_device_id products[] = {
784 {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ 785 {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
785 {QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */ 786 {QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
786 {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ 787 {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
788 {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
787 {QMI_FIXED_INTF(0x03f0, 0x581d, 4)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */ 789 {QMI_FIXED_INTF(0x03f0, 0x581d, 4)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
788 790
789 /* 4. Gobi 1000 devices */ 791 /* 4. Gobi 1000 devices */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index aafa1a1898e4..ad8cbc6c9ee7 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -27,7 +27,7 @@
27#include <linux/usb/cdc.h> 27#include <linux/usb/cdc.h>
28 28
29/* Version Information */ 29/* Version Information */
30#define DRIVER_VERSION "v1.08.0 (2015/01/13)" 30#define DRIVER_VERSION "v1.08.1 (2015/07/28)"
31#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" 31#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
32#define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters" 32#define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters"
33#define MODULENAME "r8152" 33#define MODULENAME "r8152"
@@ -494,6 +494,7 @@ enum rtl8152_flags {
494#define VENDOR_ID_REALTEK 0x0bda 494#define VENDOR_ID_REALTEK 0x0bda
495#define VENDOR_ID_SAMSUNG 0x04e8 495#define VENDOR_ID_SAMSUNG 0x04e8
496#define VENDOR_ID_LENOVO 0x17ef 496#define VENDOR_ID_LENOVO 0x17ef
497#define VENDOR_ID_NVIDIA 0x0955
497 498
498#define MCU_TYPE_PLA 0x0100 499#define MCU_TYPE_PLA 0x0100
499#define MCU_TYPE_USB 0x0000 500#define MCU_TYPE_USB 0x0000
@@ -1901,11 +1902,10 @@ static void rtl_drop_queued_tx(struct r8152 *tp)
1901static void rtl8152_tx_timeout(struct net_device *netdev) 1902static void rtl8152_tx_timeout(struct net_device *netdev)
1902{ 1903{
1903 struct r8152 *tp = netdev_priv(netdev); 1904 struct r8152 *tp = netdev_priv(netdev);
1904 int i;
1905 1905
1906 netif_warn(tp, tx_err, netdev, "Tx timeout\n"); 1906 netif_warn(tp, tx_err, netdev, "Tx timeout\n");
1907 for (i = 0; i < RTL8152_MAX_TX; i++) 1907
1908 usb_unlink_urb(tp->tx_info[i].urb); 1908 usb_queue_reset_device(tp->intf);
1909} 1909}
1910 1910
1911static void rtl8152_set_rx_mode(struct net_device *netdev) 1911static void rtl8152_set_rx_mode(struct net_device *netdev)
@@ -2074,7 +2074,6 @@ static int rtl_start_rx(struct r8152 *tp)
2074{ 2074{
2075 int i, ret = 0; 2075 int i, ret = 0;
2076 2076
2077 napi_disable(&tp->napi);
2078 INIT_LIST_HEAD(&tp->rx_done); 2077 INIT_LIST_HEAD(&tp->rx_done);
2079 for (i = 0; i < RTL8152_MAX_RX; i++) { 2078 for (i = 0; i < RTL8152_MAX_RX; i++) {
2080 INIT_LIST_HEAD(&tp->rx_info[i].list); 2079 INIT_LIST_HEAD(&tp->rx_info[i].list);
@@ -2082,7 +2081,6 @@ static int rtl_start_rx(struct r8152 *tp)
2082 if (ret) 2081 if (ret)
2083 break; 2082 break;
2084 } 2083 }
2085 napi_enable(&tp->napi);
2086 2084
2087 if (ret && ++i < RTL8152_MAX_RX) { 2085 if (ret && ++i < RTL8152_MAX_RX) {
2088 struct list_head rx_queue; 2086 struct list_head rx_queue;
@@ -2165,6 +2163,7 @@ static int rtl8153_enable(struct r8152 *tp)
2165 if (test_bit(RTL8152_UNPLUG, &tp->flags)) 2163 if (test_bit(RTL8152_UNPLUG, &tp->flags))
2166 return -ENODEV; 2164 return -ENODEV;
2167 2165
2166 usb_disable_lpm(tp->udev);
2168 set_tx_qlen(tp); 2167 set_tx_qlen(tp);
2169 rtl_set_eee_plus(tp); 2168 rtl_set_eee_plus(tp);
2170 r8153_set_rx_early_timeout(tp); 2169 r8153_set_rx_early_timeout(tp);
@@ -2336,11 +2335,61 @@ static void __rtl_set_wol(struct r8152 *tp, u32 wolopts)
2336 device_set_wakeup_enable(&tp->udev->dev, false); 2335 device_set_wakeup_enable(&tp->udev->dev, false);
2337} 2336}
2338 2337
2338static void r8153_u1u2en(struct r8152 *tp, bool enable)
2339{
2340 u8 u1u2[8];
2341
2342 if (enable)
2343 memset(u1u2, 0xff, sizeof(u1u2));
2344 else
2345 memset(u1u2, 0x00, sizeof(u1u2));
2346
2347 usb_ocp_write(tp, USB_TOLERANCE, BYTE_EN_SIX_BYTES, sizeof(u1u2), u1u2);
2348}
2349
2350static void r8153_u2p3en(struct r8152 *tp, bool enable)
2351{
2352 u32 ocp_data;
2353
2354 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL);
2355 if (enable && tp->version != RTL_VER_03 && tp->version != RTL_VER_04)
2356 ocp_data |= U2P3_ENABLE;
2357 else
2358 ocp_data &= ~U2P3_ENABLE;
2359 ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data);
2360}
2361
2362static void r8153_power_cut_en(struct r8152 *tp, bool enable)
2363{
2364 u32 ocp_data;
2365
2366 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_POWER_CUT);
2367 if (enable)
2368 ocp_data |= PWR_EN | PHASE2_EN;
2369 else
2370 ocp_data &= ~(PWR_EN | PHASE2_EN);
2371 ocp_write_word(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data);
2372
2373 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
2374 ocp_data &= ~PCUT_STATUS;
2375 ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data);
2376}
2377
2378static bool rtl_can_wakeup(struct r8152 *tp)
2379{
2380 struct usb_device *udev = tp->udev;
2381
2382 return (udev->actconfig->desc.bmAttributes & USB_CONFIG_ATT_WAKEUP);
2383}
2384
2339static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable) 2385static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
2340{ 2386{
2341 if (enable) { 2387 if (enable) {
2342 u32 ocp_data; 2388 u32 ocp_data;
2343 2389
2390 r8153_u1u2en(tp, false);
2391 r8153_u2p3en(tp, false);
2392
2344 __rtl_set_wol(tp, WAKE_ANY); 2393 __rtl_set_wol(tp, WAKE_ANY);
2345 2394
2346 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); 2395 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
@@ -2352,6 +2401,8 @@ static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
2352 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); 2401 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
2353 } else { 2402 } else {
2354 __rtl_set_wol(tp, tp->saved_wolopts); 2403 __rtl_set_wol(tp, tp->saved_wolopts);
2404 r8153_u2p3en(tp, true);
2405 r8153_u1u2en(tp, true);
2355 } 2406 }
2356} 2407}
2357 2408
@@ -2598,46 +2649,6 @@ static void r8153_hw_phy_cfg(struct r8152 *tp)
2598 set_bit(PHY_RESET, &tp->flags); 2649 set_bit(PHY_RESET, &tp->flags);
2599} 2650}
2600 2651
2601static void r8153_u1u2en(struct r8152 *tp, bool enable)
2602{
2603 u8 u1u2[8];
2604
2605 if (enable)
2606 memset(u1u2, 0xff, sizeof(u1u2));
2607 else
2608 memset(u1u2, 0x00, sizeof(u1u2));
2609
2610 usb_ocp_write(tp, USB_TOLERANCE, BYTE_EN_SIX_BYTES, sizeof(u1u2), u1u2);
2611}
2612
2613static void r8153_u2p3en(struct r8152 *tp, bool enable)
2614{
2615 u32 ocp_data;
2616
2617 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL);
2618 if (enable)
2619 ocp_data |= U2P3_ENABLE;
2620 else
2621 ocp_data &= ~U2P3_ENABLE;
2622 ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data);
2623}
2624
2625static void r8153_power_cut_en(struct r8152 *tp, bool enable)
2626{
2627 u32 ocp_data;
2628
2629 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_POWER_CUT);
2630 if (enable)
2631 ocp_data |= PWR_EN | PHASE2_EN;
2632 else
2633 ocp_data &= ~(PWR_EN | PHASE2_EN);
2634 ocp_write_word(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data);
2635
2636 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
2637 ocp_data &= ~PCUT_STATUS;
2638 ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data);
2639}
2640
2641static void r8153_first_init(struct r8152 *tp) 2652static void r8153_first_init(struct r8152 *tp)
2642{ 2653{
2643 u32 ocp_data; 2654 u32 ocp_data;
@@ -2780,6 +2791,7 @@ static void rtl8153_disable(struct r8152 *tp)
2780 r8153_disable_aldps(tp); 2791 r8153_disable_aldps(tp);
2781 rtl_disable(tp); 2792 rtl_disable(tp);
2782 r8153_enable_aldps(tp); 2793 r8153_enable_aldps(tp);
2794 usb_enable_lpm(tp->udev);
2783} 2795}
2784 2796
2785static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex) 2797static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
@@ -2900,9 +2912,13 @@ static void rtl8153_up(struct r8152 *tp)
2900 if (test_bit(RTL8152_UNPLUG, &tp->flags)) 2912 if (test_bit(RTL8152_UNPLUG, &tp->flags))
2901 return; 2913 return;
2902 2914
2915 r8153_u1u2en(tp, false);
2903 r8153_disable_aldps(tp); 2916 r8153_disable_aldps(tp);
2904 r8153_first_init(tp); 2917 r8153_first_init(tp);
2905 r8153_enable_aldps(tp); 2918 r8153_enable_aldps(tp);
2919 r8153_u2p3en(tp, true);
2920 r8153_u1u2en(tp, true);
2921 usb_enable_lpm(tp->udev);
2906} 2922}
2907 2923
2908static void rtl8153_down(struct r8152 *tp) 2924static void rtl8153_down(struct r8152 *tp)
@@ -2913,6 +2929,7 @@ static void rtl8153_down(struct r8152 *tp)
2913 } 2929 }
2914 2930
2915 r8153_u1u2en(tp, false); 2931 r8153_u1u2en(tp, false);
2932 r8153_u2p3en(tp, false);
2916 r8153_power_cut_en(tp, false); 2933 r8153_power_cut_en(tp, false);
2917 r8153_disable_aldps(tp); 2934 r8153_disable_aldps(tp);
2918 r8153_enter_oob(tp); 2935 r8153_enter_oob(tp);
@@ -2931,8 +2948,10 @@ static void set_carrier(struct r8152 *tp)
2931 if (!netif_carrier_ok(netdev)) { 2948 if (!netif_carrier_ok(netdev)) {
2932 tp->rtl_ops.enable(tp); 2949 tp->rtl_ops.enable(tp);
2933 set_bit(RTL8152_SET_RX_MODE, &tp->flags); 2950 set_bit(RTL8152_SET_RX_MODE, &tp->flags);
2951 napi_disable(&tp->napi);
2934 netif_carrier_on(netdev); 2952 netif_carrier_on(netdev);
2935 rtl_start_rx(tp); 2953 rtl_start_rx(tp);
2954 napi_enable(&tp->napi);
2936 } 2955 }
2937 } else { 2956 } else {
2938 if (netif_carrier_ok(netdev)) { 2957 if (netif_carrier_ok(netdev)) {
@@ -3251,6 +3270,7 @@ static void r8153_init(struct r8152 *tp)
3251 msleep(20); 3270 msleep(20);
3252 } 3271 }
3253 3272
3273 usb_disable_lpm(tp->udev);
3254 r8153_u2p3en(tp, false); 3274 r8153_u2p3en(tp, false);
3255 3275
3256 if (tp->version == RTL_VER_04) { 3276 if (tp->version == RTL_VER_04) {
@@ -3318,6 +3338,59 @@ static void r8153_init(struct r8152 *tp)
3318 r8153_enable_aldps(tp); 3338 r8153_enable_aldps(tp);
3319 r8152b_enable_fc(tp); 3339 r8152b_enable_fc(tp);
3320 rtl_tally_reset(tp); 3340 rtl_tally_reset(tp);
3341 r8153_u2p3en(tp, true);
3342}
3343
3344static int rtl8152_pre_reset(struct usb_interface *intf)
3345{
3346 struct r8152 *tp = usb_get_intfdata(intf);
3347 struct net_device *netdev;
3348
3349 if (!tp)
3350 return 0;
3351
3352 netdev = tp->netdev;
3353 if (!netif_running(netdev))
3354 return 0;
3355
3356 napi_disable(&tp->napi);
3357 clear_bit(WORK_ENABLE, &tp->flags);
3358 usb_kill_urb(tp->intr_urb);
3359 cancel_delayed_work_sync(&tp->schedule);
3360 if (netif_carrier_ok(netdev)) {
3361 netif_stop_queue(netdev);
3362 mutex_lock(&tp->control);
3363 tp->rtl_ops.disable(tp);
3364 mutex_unlock(&tp->control);
3365 }
3366
3367 return 0;
3368}
3369
3370static int rtl8152_post_reset(struct usb_interface *intf)
3371{
3372 struct r8152 *tp = usb_get_intfdata(intf);
3373 struct net_device *netdev;
3374
3375 if (!tp)
3376 return 0;
3377
3378 netdev = tp->netdev;
3379 if (!netif_running(netdev))
3380 return 0;
3381
3382 set_bit(WORK_ENABLE, &tp->flags);
3383 if (netif_carrier_ok(netdev)) {
3384 mutex_lock(&tp->control);
3385 tp->rtl_ops.enable(tp);
3386 rtl8152_set_rx_mode(netdev);
3387 mutex_unlock(&tp->control);
3388 netif_wake_queue(netdev);
3389 }
3390
3391 napi_enable(&tp->napi);
3392
3393 return 0;
3321} 3394}
3322 3395
3323static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message) 3396static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
@@ -3373,9 +3446,11 @@ static int rtl8152_resume(struct usb_interface *intf)
3373 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { 3446 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
3374 rtl_runtime_suspend_enable(tp, false); 3447 rtl_runtime_suspend_enable(tp, false);
3375 clear_bit(SELECTIVE_SUSPEND, &tp->flags); 3448 clear_bit(SELECTIVE_SUSPEND, &tp->flags);
3449 napi_disable(&tp->napi);
3376 set_bit(WORK_ENABLE, &tp->flags); 3450 set_bit(WORK_ENABLE, &tp->flags);
3377 if (netif_carrier_ok(tp->netdev)) 3451 if (netif_carrier_ok(tp->netdev))
3378 rtl_start_rx(tp); 3452 rtl_start_rx(tp);
3453 napi_enable(&tp->napi);
3379 } else { 3454 } else {
3380 tp->rtl_ops.up(tp); 3455 tp->rtl_ops.up(tp);
3381 rtl8152_set_speed(tp, AUTONEG_ENABLE, 3456 rtl8152_set_speed(tp, AUTONEG_ENABLE,
@@ -3402,12 +3477,15 @@ static void rtl8152_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3402 if (usb_autopm_get_interface(tp->intf) < 0) 3477 if (usb_autopm_get_interface(tp->intf) < 0)
3403 return; 3478 return;
3404 3479
3405 mutex_lock(&tp->control); 3480 if (!rtl_can_wakeup(tp)) {
3406 3481 wol->supported = 0;
3407 wol->supported = WAKE_ANY; 3482 wol->wolopts = 0;
3408 wol->wolopts = __rtl_get_wol(tp); 3483 } else {
3409 3484 mutex_lock(&tp->control);
3410 mutex_unlock(&tp->control); 3485 wol->supported = WAKE_ANY;
3486 wol->wolopts = __rtl_get_wol(tp);
3487 mutex_unlock(&tp->control);
3488 }
3411 3489
3412 usb_autopm_put_interface(tp->intf); 3490 usb_autopm_put_interface(tp->intf);
3413} 3491}
@@ -3417,6 +3495,9 @@ static int rtl8152_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3417 struct r8152 *tp = netdev_priv(dev); 3495 struct r8152 *tp = netdev_priv(dev);
3418 int ret; 3496 int ret;
3419 3497
3498 if (!rtl_can_wakeup(tp))
3499 return -EOPNOTSUPP;
3500
3420 ret = usb_autopm_get_interface(tp->intf); 3501 ret = usb_autopm_get_interface(tp->intf);
3421 if (ret < 0) 3502 if (ret < 0)
3422 goto out_set_wol; 3503 goto out_set_wol;
@@ -4058,6 +4139,9 @@ static int rtl8152_probe(struct usb_interface *intf,
4058 goto out1; 4139 goto out1;
4059 } 4140 }
4060 4141
4142 if (!rtl_can_wakeup(tp))
4143 __rtl_set_wol(tp, 0);
4144
4061 tp->saved_wolopts = __rtl_get_wol(tp); 4145 tp->saved_wolopts = __rtl_get_wol(tp);
4062 if (tp->saved_wolopts) 4146 if (tp->saved_wolopts)
4063 device_set_wakeup_enable(&udev->dev, true); 4147 device_set_wakeup_enable(&udev->dev, true);
@@ -4117,6 +4201,7 @@ static struct usb_device_id rtl8152_table[] = {
4117 {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)}, 4201 {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
4118 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)}, 4202 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)},
4119 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)}, 4203 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)},
4204 {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)},
4120 {} 4205 {}
4121}; 4206};
4122 4207
@@ -4130,6 +4215,8 @@ static struct usb_driver rtl8152_driver = {
4130 .suspend = rtl8152_suspend, 4215 .suspend = rtl8152_suspend,
4131 .resume = rtl8152_resume, 4216 .resume = rtl8152_resume,
4132 .reset_resume = rtl8152_resume, 4217 .reset_resume = rtl8152_resume,
4218 .pre_reset = rtl8152_pre_reset,
4219 .post_reset = rtl8152_post_reset,
4133 .supports_autosuspend = 1, 4220 .supports_autosuspend = 1,
4134 .disable_hub_initiated_lpm = 1, 4221 .disable_hub_initiated_lpm = 1,
4135}; 4222};
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 63c7810e1545..237f8e5e493d 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1756,9 +1756,9 @@ static int virtnet_probe(struct virtio_device *vdev)
1756 /* Do we support "hardware" checksums? */ 1756 /* Do we support "hardware" checksums? */
1757 if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { 1757 if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
1758 /* This opens up the world of extra features. */ 1758 /* This opens up the world of extra features. */
1759 dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; 1759 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
1760 if (csum) 1760 if (csum)
1761 dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; 1761 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
1762 1762
1763 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { 1763 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
1764 dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO 1764 dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
@@ -1828,7 +1828,8 @@ static int virtnet_probe(struct virtio_device *vdev)
1828 else 1828 else
1829 vi->hdr_len = sizeof(struct virtio_net_hdr); 1829 vi->hdr_len = sizeof(struct virtio_net_hdr);
1830 1830
1831 if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT)) 1831 if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
1832 virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
1832 vi->any_header_sg = true; 1833 vi->any_header_sg = true;
1833 1834
1834 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) 1835 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index da11bb5e9c7f..46f4caddccbe 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1216,7 +1216,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1216 static const u32 rxprod_reg[2] = { 1216 static const u32 rxprod_reg[2] = {
1217 VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2 1217 VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
1218 }; 1218 };
1219 u32 num_rxd = 0; 1219 u32 num_pkts = 0;
1220 bool skip_page_frags = false; 1220 bool skip_page_frags = false;
1221 struct Vmxnet3_RxCompDesc *rcd; 1221 struct Vmxnet3_RxCompDesc *rcd;
1222 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx; 1222 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
@@ -1235,13 +1235,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1235 struct Vmxnet3_RxDesc *rxd; 1235 struct Vmxnet3_RxDesc *rxd;
1236 u32 idx, ring_idx; 1236 u32 idx, ring_idx;
1237 struct vmxnet3_cmd_ring *ring = NULL; 1237 struct vmxnet3_cmd_ring *ring = NULL;
1238 if (num_rxd >= quota) { 1238 if (num_pkts >= quota) {
1239 /* we may stop even before we see the EOP desc of 1239 /* we may stop even before we see the EOP desc of
1240 * the current pkt 1240 * the current pkt
1241 */ 1241 */
1242 break; 1242 break;
1243 } 1243 }
1244 num_rxd++;
1245 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2); 1244 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2);
1246 idx = rcd->rxdIdx; 1245 idx = rcd->rxdIdx;
1247 ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1; 1246 ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1;
@@ -1413,6 +1412,7 @@ not_lro:
1413 napi_gro_receive(&rq->napi, skb); 1412 napi_gro_receive(&rq->napi, skb);
1414 1413
1415 ctx->skb = NULL; 1414 ctx->skb = NULL;
1415 num_pkts++;
1416 } 1416 }
1417 1417
1418rcd_done: 1418rcd_done:
@@ -1443,7 +1443,7 @@ rcd_done:
1443 &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp); 1443 &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
1444 } 1444 }
1445 1445
1446 return num_rxd; 1446 return num_pkts;
1447} 1447}
1448 1448
1449 1449
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 7193b7304fdd..848ea6a399f2 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -589,7 +589,8 @@ static int cosa_probe(int base, int irq, int dma)
589 chan->netdev->base_addr = chan->cosa->datareg; 589 chan->netdev->base_addr = chan->cosa->datareg;
590 chan->netdev->irq = chan->cosa->irq; 590 chan->netdev->irq = chan->cosa->irq;
591 chan->netdev->dma = chan->cosa->dma; 591 chan->netdev->dma = chan->cosa->dma;
592 if (register_hdlc_device(chan->netdev)) { 592 err = register_hdlc_device(chan->netdev);
593 if (err) {
593 netdev_warn(chan->netdev, 594 netdev_warn(chan->netdev,
594 "register_hdlc_device() failed\n"); 595 "register_hdlc_device() failed\n");
595 free_netdev(chan->netdev); 596 free_netdev(chan->netdev);
diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
index feacc3b994b7..2f0bd6955f33 100644
--- a/drivers/net/wan/z85230.c
+++ b/drivers/net/wan/z85230.c
@@ -1044,7 +1044,7 @@ EXPORT_SYMBOL(z8530_sync_dma_close);
1044 * @dev: The network device to attach 1044 * @dev: The network device to attach
1045 * @c: The Z8530 channel to configure in sync DMA mode. 1045 * @c: The Z8530 channel to configure in sync DMA mode.
1046 * 1046 *
1047 * Set up a Z85x30 device for synchronous DMA tranmission. One 1047 * Set up a Z85x30 device for synchronous DMA transmission. One
1048 * ISA DMA channel must be available for this to work. The receive 1048 * ISA DMA channel must be available for this to work. The receive
1049 * side is run in PIO mode, but then it has the bigger FIFO. 1049 * side is run in PIO mode, but then it has the bigger FIFO.
1050 */ 1050 */
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 5e15e8e10ed3..a31a6804dc34 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -279,6 +279,7 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
279 return; 279 return;
280 case AR9300_DEVID_QCA956X: 280 case AR9300_DEVID_QCA956X:
281 ah->hw_version.macVersion = AR_SREV_VERSION_9561; 281 ah->hw_version.macVersion = AR_SREV_VERSION_9561;
282 return;
282 } 283 }
283 284
284 val = REG_READ(ah, AR_SREV) & AR_SREV_ID; 285 val = REG_READ(ah, AR_SREV) & AR_SREV_ID;
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index 25d1cbd34306..b2f0d245bcf3 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -3728,7 +3728,7 @@ const u32 *b43_nphy_get_tx_gain_table(struct b43_wldev *dev)
3728 switch (phy->rev) { 3728 switch (phy->rev) {
3729 case 6: 3729 case 6:
3730 case 5: 3730 case 5:
3731 if (sprom->fem.ghz5.extpa_gain == 3) 3731 if (sprom->fem.ghz2.extpa_gain == 3)
3732 return b43_ntab_tx_gain_epa_rev3_hi_pwr_2g; 3732 return b43_ntab_tx_gain_epa_rev3_hi_pwr_2g;
3733 /* fall through */ 3733 /* fall through */
3734 case 4: 3734 case 4:
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index d56064861a9c..d45dc021cda2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -438,6 +438,12 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
438#define RX_QUEUE_MASK 255 438#define RX_QUEUE_MASK 255
439#define RX_QUEUE_SIZE_LOG 8 439#define RX_QUEUE_SIZE_LOG 8
440 440
441/*
442 * RX related structures and functions
443 */
444#define RX_FREE_BUFFERS 64
445#define RX_LOW_WATERMARK 8
446
441/** 447/**
442 * struct iwl_rb_status - reserve buffer status 448 * struct iwl_rb_status - reserve buffer status
443 * host memory mapped FH registers 449 * host memory mapped FH registers
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index 80fefe7d7b8c..3b8e85e51002 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -540,13 +540,11 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
540 hw_addr = (const u8 *)(mac_override + 540 hw_addr = (const u8 *)(mac_override +
541 MAC_ADDRESS_OVERRIDE_FAMILY_8000); 541 MAC_ADDRESS_OVERRIDE_FAMILY_8000);
542 542
543 /* The byte order is little endian 16 bit, meaning 214365 */ 543 /*
544 data->hw_addr[0] = hw_addr[1]; 544 * Store the MAC address from MAO section.
545 data->hw_addr[1] = hw_addr[0]; 545 * No byte swapping is required in MAO section
546 data->hw_addr[2] = hw_addr[3]; 546 */
547 data->hw_addr[3] = hw_addr[2]; 547 memcpy(data->hw_addr, hw_addr, ETH_ALEN);
548 data->hw_addr[4] = hw_addr[5];
549 data->hw_addr[5] = hw_addr[4];
550 548
551 /* 549 /*
552 * Force the use of the OTP MAC address in case of reserved MAC 550 * Force the use of the OTP MAC address in case of reserved MAC
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 5e4cbdb44c60..737774a01c74 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -660,7 +660,8 @@ struct iwl_scan_config {
660 * iwl_umac_scan_flags 660 * iwl_umac_scan_flags
661 *@IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request 661 *@IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request
662 * can be preempted by other scan requests with higher priority. 662 * can be preempted by other scan requests with higher priority.
663 * The low priority scan is aborted. 663 * The low priority scan will be resumed when the higher proirity scan is
664 * completed.
664 *@IWL_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver 665 *@IWL_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver
665 * when scan starts. 666 * when scan starts.
666 */ 667 */
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 5de144968723..5514ad6d4e54 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -1023,7 +1023,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
1023 cmd->scan_priority = 1023 cmd->scan_priority =
1024 iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6); 1024 iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
1025 1025
1026 if (iwl_mvm_scan_total_iterations(params) == 0) 1026 if (iwl_mvm_scan_total_iterations(params) == 1)
1027 cmd->ooc_priority = 1027 cmd->ooc_priority =
1028 iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6); 1028 iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
1029 else 1029 else
@@ -1109,6 +1109,9 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1109 cmd->uid = cpu_to_le32(uid); 1109 cmd->uid = cpu_to_le32(uid);
1110 cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params)); 1110 cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params));
1111 1111
1112 if (type == IWL_MVM_SCAN_SCHED)
1113 cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
1114
1112 if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations)) 1115 if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations))
1113 cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS | 1116 cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
1114 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | 1117 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index d68dc697a4a0..26f076e82149 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -1401,6 +1401,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
1401 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); 1401 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
1402 u8 sta_id; 1402 u8 sta_id;
1403 int ret; 1403 int ret;
1404 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
1404 1405
1405 lockdep_assert_held(&mvm->mutex); 1406 lockdep_assert_held(&mvm->mutex);
1406 1407
@@ -1467,7 +1468,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
1467end: 1468end:
1468 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n", 1469 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
1469 keyconf->cipher, keyconf->keylen, keyconf->keyidx, 1470 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
1470 sta->addr, ret); 1471 sta ? sta->addr : zero_addr, ret);
1471 return ret; 1472 return ret;
1472} 1473}
1473 1474
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index d24b6a83e68c..e472729e5f14 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -86,7 +86,7 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
86{ 86{
87 lockdep_assert_held(&mvm->time_event_lock); 87 lockdep_assert_held(&mvm->time_event_lock);
88 88
89 if (te_data->id == TE_MAX) 89 if (!te_data->vif)
90 return; 90 return;
91 91
92 list_del(&te_data->list); 92 list_del(&te_data->list);
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 7ba7a118ff5c..89116864d2a0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -252,7 +252,7 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
252 252
253 if (info->band == IEEE80211_BAND_2GHZ && 253 if (info->band == IEEE80211_BAND_2GHZ &&
254 !iwl_mvm_bt_coex_is_shared_ant_avail(mvm)) 254 !iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
255 rate_flags = BIT(mvm->cfg->non_shared_ant) << RATE_MCS_ANT_POS; 255 rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
256 else 256 else
257 rate_flags = 257 rate_flags =
258 BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS; 258 BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 2ed1e4d2774d..9f65c1cff1b1 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -368,12 +368,14 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
368/* 3165 Series */ 368/* 3165 Series */
369 {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)}, 369 {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)},
370 {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)}, 370 {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)},
371 {IWL_PCI_DEVICE(0x3166, 0x4212, iwl3165_2ac_cfg)},
371 {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)}, 372 {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)},
372 {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)}, 373 {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)},
373 {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)}, 374 {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)},
374 {IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)}, 375 {IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)},
375 {IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)}, 376 {IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)},
376 {IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)}, 377 {IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)},
378 {IWL_PCI_DEVICE(0x3165, 0x8110, iwl3165_2ac_cfg)},
377 379
378/* 7265 Series */ 380/* 7265 Series */
379 {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, 381 {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
@@ -426,9 +428,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
426 {IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)}, 428 {IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)},
427 {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)}, 429 {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)},
428 {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)}, 430 {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)},
431 {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)},
429 {IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)}, 432 {IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)},
430 {IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8260_2ac_cfg)},
431 {IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8260_2ac_cfg)},
432 {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)}, 433 {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)},
433 {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)}, 434 {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)},
434 {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)}, 435 {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)},
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index 31f72a61cc3f..376b84e54ad7 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -44,15 +44,6 @@
44#include "iwl-io.h" 44#include "iwl-io.h"
45#include "iwl-op-mode.h" 45#include "iwl-op-mode.h"
46 46
47/*
48 * RX related structures and functions
49 */
50#define RX_NUM_QUEUES 1
51#define RX_POST_REQ_ALLOC 2
52#define RX_CLAIM_REQ_ALLOC 8
53#define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES)
54#define RX_LOW_WATERMARK 8
55
56struct iwl_host_cmd; 47struct iwl_host_cmd;
57 48
58/*This file includes the declaration that are internal to the 49/*This file includes the declaration that are internal to the
@@ -86,29 +77,29 @@ struct isr_statistics {
86 * struct iwl_rxq - Rx queue 77 * struct iwl_rxq - Rx queue
87 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) 78 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
88 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) 79 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
80 * @pool:
81 * @queue:
89 * @read: Shared index to newest available Rx buffer 82 * @read: Shared index to newest available Rx buffer
90 * @write: Shared index to oldest written Rx packet 83 * @write: Shared index to oldest written Rx packet
91 * @free_count: Number of pre-allocated buffers in rx_free 84 * @free_count: Number of pre-allocated buffers in rx_free
92 * @used_count: Number of RBDs handled to allocator to use for allocation
93 * @write_actual: 85 * @write_actual:
94 * @rx_free: list of RBDs with allocated RB ready for use 86 * @rx_free: list of free SKBs for use
95 * @rx_used: list of RBDs with no RB attached 87 * @rx_used: List of Rx buffers with no SKB
96 * @need_update: flag to indicate we need to update read/write index 88 * @need_update: flag to indicate we need to update read/write index
97 * @rb_stts: driver's pointer to receive buffer status 89 * @rb_stts: driver's pointer to receive buffer status
98 * @rb_stts_dma: bus address of receive buffer status 90 * @rb_stts_dma: bus address of receive buffer status
99 * @lock: 91 * @lock:
100 * @pool: initial pool of iwl_rx_mem_buffer for the queue
101 * @queue: actual rx queue
102 * 92 *
103 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers 93 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
104 */ 94 */
105struct iwl_rxq { 95struct iwl_rxq {
106 __le32 *bd; 96 __le32 *bd;
107 dma_addr_t bd_dma; 97 dma_addr_t bd_dma;
98 struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
99 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
108 u32 read; 100 u32 read;
109 u32 write; 101 u32 write;
110 u32 free_count; 102 u32 free_count;
111 u32 used_count;
112 u32 write_actual; 103 u32 write_actual;
113 struct list_head rx_free; 104 struct list_head rx_free;
114 struct list_head rx_used; 105 struct list_head rx_used;
@@ -116,32 +107,6 @@ struct iwl_rxq {
116 struct iwl_rb_status *rb_stts; 107 struct iwl_rb_status *rb_stts;
117 dma_addr_t rb_stts_dma; 108 dma_addr_t rb_stts_dma;
118 spinlock_t lock; 109 spinlock_t lock;
119 struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE];
120 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
121};
122
123/**
124 * struct iwl_rb_allocator - Rx allocator
125 * @pool: initial pool of allocator
126 * @req_pending: number of requests the allcator had not processed yet
127 * @req_ready: number of requests honored and ready for claiming
128 * @rbd_allocated: RBDs with pages allocated and ready to be handled to
129 * the queue. This is a list of &struct iwl_rx_mem_buffer
130 * @rbd_empty: RBDs with no page attached for allocator use. This is a list
131 * of &struct iwl_rx_mem_buffer
132 * @lock: protects the rbd_allocated and rbd_empty lists
133 * @alloc_wq: work queue for background calls
134 * @rx_alloc: work struct for background calls
135 */
136struct iwl_rb_allocator {
137 struct iwl_rx_mem_buffer pool[RX_POOL_SIZE];
138 atomic_t req_pending;
139 atomic_t req_ready;
140 struct list_head rbd_allocated;
141 struct list_head rbd_empty;
142 spinlock_t lock;
143 struct workqueue_struct *alloc_wq;
144 struct work_struct rx_alloc;
145}; 110};
146 111
147struct iwl_dma_ptr { 112struct iwl_dma_ptr {
@@ -285,7 +250,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
285/** 250/**
286 * struct iwl_trans_pcie - PCIe transport specific data 251 * struct iwl_trans_pcie - PCIe transport specific data
287 * @rxq: all the RX queue data 252 * @rxq: all the RX queue data
288 * @rba: allocator for RX replenishing 253 * @rx_replenish: work that will be called when buffers need to be allocated
289 * @drv - pointer to iwl_drv 254 * @drv - pointer to iwl_drv
290 * @trans: pointer to the generic transport area 255 * @trans: pointer to the generic transport area
291 * @scd_base_addr: scheduler sram base address in SRAM 256 * @scd_base_addr: scheduler sram base address in SRAM
@@ -308,7 +273,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
308 */ 273 */
309struct iwl_trans_pcie { 274struct iwl_trans_pcie {
310 struct iwl_rxq rxq; 275 struct iwl_rxq rxq;
311 struct iwl_rb_allocator rba; 276 struct work_struct rx_replenish;
312 struct iwl_trans *trans; 277 struct iwl_trans *trans;
313 struct iwl_drv *drv; 278 struct iwl_drv *drv;
314 279
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index a3fbaa0ef5e0..adad8d0fae7f 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -1,7 +1,7 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 4 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
5 * 5 *
6 * Portions of this file are derived from the ipw3945 project, as well 6 * Portions of this file are derived from the ipw3945 project, as well
7 * as portions of the ieee80211 subsystem header files. 7 * as portions of the ieee80211 subsystem header files.
@@ -74,29 +74,16 @@
74 * resets the Rx queue buffers with new memory. 74 * resets the Rx queue buffers with new memory.
75 * 75 *
76 * The management in the driver is as follows: 76 * The management in the driver is as follows:
77 * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free. 77 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
78 * When the interrupt handler is called, the request is processed. 78 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
79 * The page is either stolen - transferred to the upper layer 79 * to replenish the iwl->rxq->rx_free.
80 * or reused - added immediately to the iwl->rxq->rx_free list. 80 * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the
81 * + When the page is stolen - the driver updates the matching queue's used 81 * iwl->rxq is replenished and the READ INDEX is updated (updating the
82 * count, detaches the RBD and transfers it to the queue used list. 82 * 'processed' and 'read' driver indexes as well)
83 * When there are two used RBDs - they are transferred to the allocator empty
84 * list. Work is then scheduled for the allocator to start allocating
85 * eight buffers.
86 * When there are another 6 used RBDs - they are transferred to the allocator
87 * empty list and the driver tries to claim the pre-allocated buffers and
88 * add them to iwl->rxq->rx_free. If it fails - it continues to claim them
89 * until ready.
90 * When there are 8+ buffers in the free list - either from allocation or from
91 * 8 reused unstolen pages - restock is called to update the FW and indexes.
92 * + In order to make sure the allocator always has RBDs to use for allocation
93 * the allocator has initial pool in the size of num_queues*(8-2) - the
94 * maximum missing RBDs per allocation request (request posted with 2
95 * empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
96 * The queues supplies the recycle of the rest of the RBDs.
97 * + A received packet is processed and handed to the kernel network stack, 83 * + A received packet is processed and handed to the kernel network stack,
98 * detached from the iwl->rxq. The driver 'processed' index is updated. 84 * detached from the iwl->rxq. The driver 'processed' index is updated.
99 * + If there are no allocated buffers in iwl->rxq->rx_free, 85 * + The Host/Firmware iwl->rxq is replenished at irq thread time from the
86 * rx_free list. If there are no allocated buffers in iwl->rxq->rx_free,
100 * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set. 87 * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
101 * If there were enough free buffers and RX_STALLED is set it is cleared. 88 * If there were enough free buffers and RX_STALLED is set it is cleared.
102 * 89 *
@@ -105,32 +92,18 @@
105 * 92 *
106 * iwl_rxq_alloc() Allocates rx_free 93 * iwl_rxq_alloc() Allocates rx_free
107 * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls 94 * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls
108 * iwl_pcie_rxq_restock. 95 * iwl_pcie_rxq_restock
109 * Used only during initialization.
110 * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx 96 * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx
111 * queue, updates firmware pointers, and updates 97 * queue, updates firmware pointers, and updates
112 * the WRITE index. 98 * the WRITE index. If insufficient rx_free buffers
113 * iwl_pcie_rx_allocator() Background work for allocating pages. 99 * are available, schedules iwl_pcie_rx_replenish
114 * 100 *
115 * -- enable interrupts -- 101 * -- enable interrupts --
116 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the 102 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
117 * READ INDEX, detaching the SKB from the pool. 103 * READ INDEX, detaching the SKB from the pool.
118 * Moves the packet buffer from queue to rx_used. 104 * Moves the packet buffer from queue to rx_used.
119 * Posts and claims requests to the allocator.
120 * Calls iwl_pcie_rxq_restock to refill any empty 105 * Calls iwl_pcie_rxq_restock to refill any empty
121 * slots. 106 * slots.
122 *
123 * RBD life-cycle:
124 *
125 * Init:
126 * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
127 *
128 * Regular Receive interrupt:
129 * Page Stolen:
130 * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
131 * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
132 * Page not Stolen:
133 * rxq.queue -> rxq.rx_free -> rxq.queue
134 * ... 107 * ...
135 * 108 *
136 */ 109 */
@@ -267,6 +240,10 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
267 rxq->free_count--; 240 rxq->free_count--;
268 } 241 }
269 spin_unlock(&rxq->lock); 242 spin_unlock(&rxq->lock);
243 /* If the pre-allocated buffer pool is dropping low, schedule to
244 * refill it */
245 if (rxq->free_count <= RX_LOW_WATERMARK)
246 schedule_work(&trans_pcie->rx_replenish);
270 247
271 /* If we've added more space for the firmware to place data, tell it. 248 /* If we've added more space for the firmware to place data, tell it.
272 * Increment device's write pointer in multiples of 8. */ 249 * Increment device's write pointer in multiples of 8. */
@@ -278,44 +255,6 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
278} 255}
279 256
280/* 257/*
281 * iwl_pcie_rx_alloc_page - allocates and returns a page.
282 *
283 */
284static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans)
285{
286 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
287 struct iwl_rxq *rxq = &trans_pcie->rxq;
288 struct page *page;
289 gfp_t gfp_mask = GFP_KERNEL;
290
291 if (rxq->free_count > RX_LOW_WATERMARK)
292 gfp_mask |= __GFP_NOWARN;
293
294 if (trans_pcie->rx_page_order > 0)
295 gfp_mask |= __GFP_COMP;
296
297 /* Alloc a new receive buffer */
298 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
299 if (!page) {
300 if (net_ratelimit())
301 IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
302 trans_pcie->rx_page_order);
303 /* Issue an error if the hardware has consumed more than half
304 * of its free buffer list and we don't have enough
305 * pre-allocated buffers.
306` */
307 if (rxq->free_count <= RX_LOW_WATERMARK &&
308 iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) &&
309 net_ratelimit())
310 IWL_CRIT(trans,
311 "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n",
312 rxq->free_count);
313 return NULL;
314 }
315 return page;
316}
317
318/*
319 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD 258 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
320 * 259 *
321 * A used RBD is an Rx buffer that has been given to the stack. To use it again 260 * A used RBD is an Rx buffer that has been given to the stack. To use it again
@@ -324,12 +263,13 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans)
324 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly 263 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
325 * allocated buffers. 264 * allocated buffers.
326 */ 265 */
327static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans) 266static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
328{ 267{
329 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 268 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
330 struct iwl_rxq *rxq = &trans_pcie->rxq; 269 struct iwl_rxq *rxq = &trans_pcie->rxq;
331 struct iwl_rx_mem_buffer *rxb; 270 struct iwl_rx_mem_buffer *rxb;
332 struct page *page; 271 struct page *page;
272 gfp_t gfp_mask = priority;
333 273
334 while (1) { 274 while (1) {
335 spin_lock(&rxq->lock); 275 spin_lock(&rxq->lock);
@@ -339,10 +279,32 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans)
339 } 279 }
340 spin_unlock(&rxq->lock); 280 spin_unlock(&rxq->lock);
341 281
282 if (rxq->free_count > RX_LOW_WATERMARK)
283 gfp_mask |= __GFP_NOWARN;
284
285 if (trans_pcie->rx_page_order > 0)
286 gfp_mask |= __GFP_COMP;
287
342 /* Alloc a new receive buffer */ 288 /* Alloc a new receive buffer */
343 page = iwl_pcie_rx_alloc_page(trans); 289 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
344 if (!page) 290 if (!page) {
291 if (net_ratelimit())
292 IWL_DEBUG_INFO(trans, "alloc_pages failed, "
293 "order: %d\n",
294 trans_pcie->rx_page_order);
295
296 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
297 net_ratelimit())
298 IWL_CRIT(trans, "Failed to alloc_pages with %s."
299 "Only %u free buffers remaining.\n",
300 priority == GFP_ATOMIC ?
301 "GFP_ATOMIC" : "GFP_KERNEL",
302 rxq->free_count);
303 /* We don't reschedule replenish work here -- we will
304 * call the restock method and if it still needs
305 * more buffers it will schedule replenish */
345 return; 306 return;
307 }
346 308
347 spin_lock(&rxq->lock); 309 spin_lock(&rxq->lock);
348 310
@@ -393,7 +355,7 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
393 355
394 lockdep_assert_held(&rxq->lock); 356 lockdep_assert_held(&rxq->lock);
395 357
396 for (i = 0; i < RX_QUEUE_SIZE; i++) { 358 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
397 if (!rxq->pool[i].page) 359 if (!rxq->pool[i].page)
398 continue; 360 continue;
399 dma_unmap_page(trans->dev, rxq->pool[i].page_dma, 361 dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
@@ -410,144 +372,32 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
410 * When moving to rx_free an page is allocated for the slot. 372 * When moving to rx_free an page is allocated for the slot.
411 * 373 *
412 * Also restock the Rx queue via iwl_pcie_rxq_restock. 374 * Also restock the Rx queue via iwl_pcie_rxq_restock.
413 * This is called only during initialization 375 * This is called as a scheduled work item (except for during initialization)
414 */ 376 */
415static void iwl_pcie_rx_replenish(struct iwl_trans *trans) 377static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp)
416{ 378{
417 iwl_pcie_rxq_alloc_rbs(trans); 379 iwl_pcie_rxq_alloc_rbs(trans, gfp);
418 380
419 iwl_pcie_rxq_restock(trans); 381 iwl_pcie_rxq_restock(trans);
420} 382}
421 383
422/* 384static void iwl_pcie_rx_replenish_work(struct work_struct *data)
423 * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
424 *
425 * Allocates for each received request 8 pages
426 * Called as a scheduled work item.
427 */
428static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
429{
430 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
431 struct iwl_rb_allocator *rba = &trans_pcie->rba;
432
433 while (atomic_read(&rba->req_pending)) {
434 int i;
435 struct list_head local_empty;
436 struct list_head local_allocated;
437
438 INIT_LIST_HEAD(&local_allocated);
439 spin_lock(&rba->lock);
440 /* swap out the entire rba->rbd_empty to a local list */
441 list_replace_init(&rba->rbd_empty, &local_empty);
442 spin_unlock(&rba->lock);
443
444 for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
445 struct iwl_rx_mem_buffer *rxb;
446 struct page *page;
447
448 /* List should never be empty - each reused RBD is
449 * returned to the list, and initial pool covers any
450 * possible gap between the time the page is allocated
451 * to the time the RBD is added.
452 */
453 BUG_ON(list_empty(&local_empty));
454 /* Get the first rxb from the rbd list */
455 rxb = list_first_entry(&local_empty,
456 struct iwl_rx_mem_buffer, list);
457 BUG_ON(rxb->page);
458
459 /* Alloc a new receive buffer */
460 page = iwl_pcie_rx_alloc_page(trans);
461 if (!page)
462 continue;
463 rxb->page = page;
464
465 /* Get physical address of the RB */
466 rxb->page_dma = dma_map_page(trans->dev, page, 0,
467 PAGE_SIZE << trans_pcie->rx_page_order,
468 DMA_FROM_DEVICE);
469 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
470 rxb->page = NULL;
471 __free_pages(page, trans_pcie->rx_page_order);
472 continue;
473 }
474 /* dma address must be no more than 36 bits */
475 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
476 /* and also 256 byte aligned! */
477 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
478
479 /* move the allocated entry to the out list */
480 list_move(&rxb->list, &local_allocated);
481 i++;
482 }
483
484 spin_lock(&rba->lock);
485 /* add the allocated rbds to the allocator allocated list */
486 list_splice_tail(&local_allocated, &rba->rbd_allocated);
487 /* add the unused rbds back to the allocator empty list */
488 list_splice_tail(&local_empty, &rba->rbd_empty);
489 spin_unlock(&rba->lock);
490
491 atomic_dec(&rba->req_pending);
492 atomic_inc(&rba->req_ready);
493 }
494}
495
496/*
497 * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages
498.*
499.* Called by queue when the queue posted allocation request and
500 * has freed 8 RBDs in order to restock itself.
501 */
502static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
503 struct iwl_rx_mem_buffer
504 *out[RX_CLAIM_REQ_ALLOC])
505{
506 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
507 struct iwl_rb_allocator *rba = &trans_pcie->rba;
508 int i;
509
510 if (atomic_dec_return(&rba->req_ready) < 0) {
511 atomic_inc(&rba->req_ready);
512 IWL_DEBUG_RX(trans,
513 "Allocation request not ready, pending requests = %d\n",
514 atomic_read(&rba->req_pending));
515 return -ENOMEM;
516 }
517
518 spin_lock(&rba->lock);
519 for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
520 /* Get next free Rx buffer, remove it from free list */
521 out[i] = list_first_entry(&rba->rbd_allocated,
522 struct iwl_rx_mem_buffer, list);
523 list_del(&out[i]->list);
524 }
525 spin_unlock(&rba->lock);
526
527 return 0;
528}
529
530static void iwl_pcie_rx_allocator_work(struct work_struct *data)
531{ 385{
532 struct iwl_rb_allocator *rba_p =
533 container_of(data, struct iwl_rb_allocator, rx_alloc);
534 struct iwl_trans_pcie *trans_pcie = 386 struct iwl_trans_pcie *trans_pcie =
535 container_of(rba_p, struct iwl_trans_pcie, rba); 387 container_of(data, struct iwl_trans_pcie, rx_replenish);
536 388
537 iwl_pcie_rx_allocator(trans_pcie->trans); 389 iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL);
538} 390}
539 391
540static int iwl_pcie_rx_alloc(struct iwl_trans *trans) 392static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
541{ 393{
542 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 394 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
543 struct iwl_rxq *rxq = &trans_pcie->rxq; 395 struct iwl_rxq *rxq = &trans_pcie->rxq;
544 struct iwl_rb_allocator *rba = &trans_pcie->rba;
545 struct device *dev = trans->dev; 396 struct device *dev = trans->dev;
546 397
547 memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq)); 398 memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
548 399
549 spin_lock_init(&rxq->lock); 400 spin_lock_init(&rxq->lock);
550 spin_lock_init(&rba->lock);
551 401
552 if (WARN_ON(rxq->bd || rxq->rb_stts)) 402 if (WARN_ON(rxq->bd || rxq->rb_stts))
553 return -EINVAL; 403 return -EINVAL;
@@ -637,49 +487,15 @@ static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
637 INIT_LIST_HEAD(&rxq->rx_free); 487 INIT_LIST_HEAD(&rxq->rx_free);
638 INIT_LIST_HEAD(&rxq->rx_used); 488 INIT_LIST_HEAD(&rxq->rx_used);
639 rxq->free_count = 0; 489 rxq->free_count = 0;
640 rxq->used_count = 0;
641 490
642 for (i = 0; i < RX_QUEUE_SIZE; i++) 491 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
643 list_add(&rxq->pool[i].list, &rxq->rx_used); 492 list_add(&rxq->pool[i].list, &rxq->rx_used);
644} 493}
645 494
646static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba)
647{
648 int i;
649
650 lockdep_assert_held(&rba->lock);
651
652 INIT_LIST_HEAD(&rba->rbd_allocated);
653 INIT_LIST_HEAD(&rba->rbd_empty);
654
655 for (i = 0; i < RX_POOL_SIZE; i++)
656 list_add(&rba->pool[i].list, &rba->rbd_empty);
657}
658
659static void iwl_pcie_rx_free_rba(struct iwl_trans *trans)
660{
661 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
662 struct iwl_rb_allocator *rba = &trans_pcie->rba;
663 int i;
664
665 lockdep_assert_held(&rba->lock);
666
667 for (i = 0; i < RX_POOL_SIZE; i++) {
668 if (!rba->pool[i].page)
669 continue;
670 dma_unmap_page(trans->dev, rba->pool[i].page_dma,
671 PAGE_SIZE << trans_pcie->rx_page_order,
672 DMA_FROM_DEVICE);
673 __free_pages(rba->pool[i].page, trans_pcie->rx_page_order);
674 rba->pool[i].page = NULL;
675 }
676}
677
678int iwl_pcie_rx_init(struct iwl_trans *trans) 495int iwl_pcie_rx_init(struct iwl_trans *trans)
679{ 496{
680 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 497 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
681 struct iwl_rxq *rxq = &trans_pcie->rxq; 498 struct iwl_rxq *rxq = &trans_pcie->rxq;
682 struct iwl_rb_allocator *rba = &trans_pcie->rba;
683 int i, err; 499 int i, err;
684 500
685 if (!rxq->bd) { 501 if (!rxq->bd) {
@@ -687,21 +503,11 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
687 if (err) 503 if (err)
688 return err; 504 return err;
689 } 505 }
690 if (!rba->alloc_wq)
691 rba->alloc_wq = alloc_workqueue("rb_allocator",
692 WQ_HIGHPRI | WQ_UNBOUND, 1);
693 INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
694
695 spin_lock(&rba->lock);
696 atomic_set(&rba->req_pending, 0);
697 atomic_set(&rba->req_ready, 0);
698 /* free all first - we might be reconfigured for a different size */
699 iwl_pcie_rx_free_rba(trans);
700 iwl_pcie_rx_init_rba(rba);
701 spin_unlock(&rba->lock);
702 506
703 spin_lock(&rxq->lock); 507 spin_lock(&rxq->lock);
704 508
509 INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
510
705 /* free all first - we might be reconfigured for a different size */ 511 /* free all first - we might be reconfigured for a different size */
706 iwl_pcie_rxq_free_rbs(trans); 512 iwl_pcie_rxq_free_rbs(trans);
707 iwl_pcie_rx_init_rxb_lists(rxq); 513 iwl_pcie_rx_init_rxb_lists(rxq);
@@ -716,7 +522,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
716 memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); 522 memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
717 spin_unlock(&rxq->lock); 523 spin_unlock(&rxq->lock);
718 524
719 iwl_pcie_rx_replenish(trans); 525 iwl_pcie_rx_replenish(trans, GFP_KERNEL);
720 526
721 iwl_pcie_rx_hw_init(trans, rxq); 527 iwl_pcie_rx_hw_init(trans, rxq);
722 528
@@ -731,7 +537,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
731{ 537{
732 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 538 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
733 struct iwl_rxq *rxq = &trans_pcie->rxq; 539 struct iwl_rxq *rxq = &trans_pcie->rxq;
734 struct iwl_rb_allocator *rba = &trans_pcie->rba;
735 540
736 /*if rxq->bd is NULL, it means that nothing has been allocated, 541 /*if rxq->bd is NULL, it means that nothing has been allocated,
737 * exit now */ 542 * exit now */
@@ -740,15 +545,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
740 return; 545 return;
741 } 546 }
742 547
743 cancel_work_sync(&rba->rx_alloc); 548 cancel_work_sync(&trans_pcie->rx_replenish);
744 if (rba->alloc_wq) {
745 destroy_workqueue(rba->alloc_wq);
746 rba->alloc_wq = NULL;
747 }
748
749 spin_lock(&rba->lock);
750 iwl_pcie_rx_free_rba(trans);
751 spin_unlock(&rba->lock);
752 549
753 spin_lock(&rxq->lock); 550 spin_lock(&rxq->lock);
754 iwl_pcie_rxq_free_rbs(trans); 551 iwl_pcie_rxq_free_rbs(trans);
@@ -769,43 +566,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
769 rxq->rb_stts = NULL; 566 rxq->rb_stts = NULL;
770} 567}
771 568
772/*
773 * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
774 *
775 * Called when a RBD can be reused. The RBD is transferred to the allocator.
776 * When there are 2 empty RBDs - a request for allocation is posted
777 */
778static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
779 struct iwl_rx_mem_buffer *rxb,
780 struct iwl_rxq *rxq)
781{
782 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
783 struct iwl_rb_allocator *rba = &trans_pcie->rba;
784
785 /* Count the used RBDs */
786 rxq->used_count++;
787
788 /* Move the RBD to the used list, will be moved to allocator in batches
789 * before claiming or posting a request*/
790 list_add_tail(&rxb->list, &rxq->rx_used);
791
792 /* If we have RX_POST_REQ_ALLOC new released rx buffers -
793 * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
794 * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
795 * after but we still need to post another request.
796 */
797 if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
798 /* Move the 2 RBDs to the allocator ownership.
799 Allocator has another 6 from pool for the request completion*/
800 spin_lock(&rba->lock);
801 list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
802 spin_unlock(&rba->lock);
803
804 atomic_inc(&rba->req_pending);
805 queue_work(rba->alloc_wq, &rba->rx_alloc);
806 }
807}
808
809static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, 569static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
810 struct iwl_rx_mem_buffer *rxb) 570 struct iwl_rx_mem_buffer *rxb)
811{ 571{
@@ -928,13 +688,13 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
928 */ 688 */
929 __free_pages(rxb->page, trans_pcie->rx_page_order); 689 __free_pages(rxb->page, trans_pcie->rx_page_order);
930 rxb->page = NULL; 690 rxb->page = NULL;
931 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq); 691 list_add_tail(&rxb->list, &rxq->rx_used);
932 } else { 692 } else {
933 list_add_tail(&rxb->list, &rxq->rx_free); 693 list_add_tail(&rxb->list, &rxq->rx_free);
934 rxq->free_count++; 694 rxq->free_count++;
935 } 695 }
936 } else 696 } else
937 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq); 697 list_add_tail(&rxb->list, &rxq->rx_used);
938} 698}
939 699
940/* 700/*
@@ -944,7 +704,10 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
944{ 704{
945 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 705 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
946 struct iwl_rxq *rxq = &trans_pcie->rxq; 706 struct iwl_rxq *rxq = &trans_pcie->rxq;
947 u32 r, i, j; 707 u32 r, i;
708 u8 fill_rx = 0;
709 u32 count = 8;
710 int total_empty;
948 711
949restart: 712restart:
950 spin_lock(&rxq->lock); 713 spin_lock(&rxq->lock);
@@ -957,6 +720,14 @@ restart:
957 if (i == r) 720 if (i == r)
958 IWL_DEBUG_RX(trans, "HW = SW = %d\n", r); 721 IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
959 722
723 /* calculate total frames need to be restock after handling RX */
724 total_empty = r - rxq->write_actual;
725 if (total_empty < 0)
726 total_empty += RX_QUEUE_SIZE;
727
728 if (total_empty > (RX_QUEUE_SIZE / 2))
729 fill_rx = 1;
730
960 while (i != r) { 731 while (i != r) {
961 struct iwl_rx_mem_buffer *rxb; 732 struct iwl_rx_mem_buffer *rxb;
962 733
@@ -968,48 +739,29 @@ restart:
968 iwl_pcie_rx_handle_rb(trans, rxb); 739 iwl_pcie_rx_handle_rb(trans, rxb);
969 740
970 i = (i + 1) & RX_QUEUE_MASK; 741 i = (i + 1) & RX_QUEUE_MASK;
971 742 /* If there are a lot of unused frames,
972 /* If we have RX_CLAIM_REQ_ALLOC released rx buffers - 743 * restock the Rx queue so ucode wont assert. */
973 * try to claim the pre-allocated buffers from the allocator */ 744 if (fill_rx) {
974 if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) { 745 count++;
975 struct iwl_rb_allocator *rba = &trans_pcie->rba; 746 if (count >= 8) {
976 struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC]; 747 rxq->read = i;
977 748 spin_unlock(&rxq->lock);
978 /* Add the remaining 6 empty RBDs for allocator use */ 749 iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
979 spin_lock(&rba->lock); 750 count = 0;
980 list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); 751 goto restart;
981 spin_unlock(&rba->lock);
982
983 /* If not ready - continue, will try to reclaim later.
984 * No need to reschedule work - allocator exits only on
985 * success */
986 if (!iwl_pcie_rx_allocator_get(trans, out)) {
987 /* If success - then RX_CLAIM_REQ_ALLOC
988 * buffers were retrieved and should be added
989 * to free list */
990 rxq->used_count -= RX_CLAIM_REQ_ALLOC;
991 for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) {
992 list_add_tail(&out[j]->list,
993 &rxq->rx_free);
994 rxq->free_count++;
995 }
996 } 752 }
997 } 753 }
998 /* handle restock for two cases:
999 * - we just pulled buffers from the allocator
1000 * - we have 8+ unstolen pages accumulated */
1001 if (rxq->free_count >= RX_CLAIM_REQ_ALLOC) {
1002 rxq->read = i;
1003 spin_unlock(&rxq->lock);
1004 iwl_pcie_rxq_restock(trans);
1005 goto restart;
1006 }
1007 } 754 }
1008 755
1009 /* Backtrack one entry */ 756 /* Backtrack one entry */
1010 rxq->read = i; 757 rxq->read = i;
1011 spin_unlock(&rxq->lock); 758 spin_unlock(&rxq->lock);
1012 759
760 if (fill_rx)
761 iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
762 else
763 iwl_pcie_rxq_restock(trans);
764
1013 if (trans_pcie->napi.poll) 765 if (trans_pcie->napi.poll)
1014 napi_gro_flush(&trans_pcie->napi, false); 766 napi_gro_flush(&trans_pcie->napi, false);
1015} 767}
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 43ae658af6ec..9e144e71da0b 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -182,7 +182,7 @@ static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
182 182
183static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux) 183static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
184{ 184{
185 if (!trans->cfg->apmg_not_supported) 185 if (trans->cfg->apmg_not_supported)
186 return; 186 return;
187 187
188 if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold)) 188 if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
@@ -478,10 +478,16 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
478 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) 478 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
479 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, 479 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
480 APMG_PCIDEV_STT_VAL_WAKE_ME); 480 APMG_PCIDEV_STT_VAL_WAKE_ME);
481 else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) 481 else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
482 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
483 CSR_RESET_LINK_PWR_MGMT_DISABLED);
482 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 484 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
483 CSR_HW_IF_CONFIG_REG_PREPARE | 485 CSR_HW_IF_CONFIG_REG_PREPARE |
484 CSR_HW_IF_CONFIG_REG_ENABLE_PME); 486 CSR_HW_IF_CONFIG_REG_ENABLE_PME);
487 mdelay(1);
488 iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
489 CSR_RESET_LINK_PWR_MGMT_DISABLED);
490 }
485 mdelay(5); 491 mdelay(5);
486 } 492 }
487 493
@@ -575,6 +581,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
575 if (ret >= 0) 581 if (ret >= 0)
576 return 0; 582 return 0;
577 583
584 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
585 CSR_RESET_LINK_PWR_MGMT_DISABLED);
586 msleep(1);
587
578 for (iter = 0; iter < 10; iter++) { 588 for (iter = 0; iter < 10; iter++) {
579 /* If HW is not ready, prepare the conditions to check again */ 589 /* If HW is not ready, prepare the conditions to check again */
580 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 590 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
@@ -582,8 +592,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
582 592
583 do { 593 do {
584 ret = iwl_pcie_set_hw_ready(trans); 594 ret = iwl_pcie_set_hw_ready(trans);
585 if (ret >= 0) 595 if (ret >= 0) {
586 return 0; 596 ret = 0;
597 goto out;
598 }
587 599
588 usleep_range(200, 1000); 600 usleep_range(200, 1000);
589 t += 200; 601 t += 200;
@@ -593,6 +605,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
593 605
594 IWL_ERR(trans, "Couldn't prepare the card\n"); 606 IWL_ERR(trans, "Couldn't prepare the card\n");
595 607
608out:
609 iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
610 CSR_RESET_LINK_PWR_MGMT_DISABLED);
611
596 return ret; 612 return ret;
597} 613}
598 614
@@ -2459,7 +2475,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2459 struct iwl_trans_pcie *trans_pcie; 2475 struct iwl_trans_pcie *trans_pcie;
2460 struct iwl_trans *trans; 2476 struct iwl_trans *trans;
2461 u16 pci_cmd; 2477 u16 pci_cmd;
2462 int err; 2478 int ret;
2463 2479
2464 trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), 2480 trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
2465 &pdev->dev, cfg, &trans_ops_pcie, 0); 2481 &pdev->dev, cfg, &trans_ops_pcie, 0);
@@ -2474,8 +2490,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2474 spin_lock_init(&trans_pcie->ref_lock); 2490 spin_lock_init(&trans_pcie->ref_lock);
2475 init_waitqueue_head(&trans_pcie->ucode_write_waitq); 2491 init_waitqueue_head(&trans_pcie->ucode_write_waitq);
2476 2492
2477 err = pci_enable_device(pdev); 2493 ret = pci_enable_device(pdev);
2478 if (err) 2494 if (ret)
2479 goto out_no_pci; 2495 goto out_no_pci;
2480 2496
2481 if (!cfg->base_params->pcie_l1_allowed) { 2497 if (!cfg->base_params->pcie_l1_allowed) {
@@ -2491,23 +2507,23 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2491 2507
2492 pci_set_master(pdev); 2508 pci_set_master(pdev);
2493 2509
2494 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); 2510 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
2495 if (!err) 2511 if (!ret)
2496 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); 2512 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
2497 if (err) { 2513 if (ret) {
2498 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2514 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2499 if (!err) 2515 if (!ret)
2500 err = pci_set_consistent_dma_mask(pdev, 2516 ret = pci_set_consistent_dma_mask(pdev,
2501 DMA_BIT_MASK(32)); 2517 DMA_BIT_MASK(32));
2502 /* both attempts failed: */ 2518 /* both attempts failed: */
2503 if (err) { 2519 if (ret) {
2504 dev_err(&pdev->dev, "No suitable DMA available\n"); 2520 dev_err(&pdev->dev, "No suitable DMA available\n");
2505 goto out_pci_disable_device; 2521 goto out_pci_disable_device;
2506 } 2522 }
2507 } 2523 }
2508 2524
2509 err = pci_request_regions(pdev, DRV_NAME); 2525 ret = pci_request_regions(pdev, DRV_NAME);
2510 if (err) { 2526 if (ret) {
2511 dev_err(&pdev->dev, "pci_request_regions failed\n"); 2527 dev_err(&pdev->dev, "pci_request_regions failed\n");
2512 goto out_pci_disable_device; 2528 goto out_pci_disable_device;
2513 } 2529 }
@@ -2515,7 +2531,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2515 trans_pcie->hw_base = pci_ioremap_bar(pdev, 0); 2531 trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
2516 if (!trans_pcie->hw_base) { 2532 if (!trans_pcie->hw_base) {
2517 dev_err(&pdev->dev, "pci_ioremap_bar failed\n"); 2533 dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
2518 err = -ENODEV; 2534 ret = -ENODEV;
2519 goto out_pci_release_regions; 2535 goto out_pci_release_regions;
2520 } 2536 }
2521 2537
@@ -2527,9 +2543,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2527 trans_pcie->pci_dev = pdev; 2543 trans_pcie->pci_dev = pdev;
2528 iwl_disable_interrupts(trans); 2544 iwl_disable_interrupts(trans);
2529 2545
2530 err = pci_enable_msi(pdev); 2546 ret = pci_enable_msi(pdev);
2531 if (err) { 2547 if (ret) {
2532 dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err); 2548 dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret);
2533 /* enable rfkill interrupt: hw bug w/a */ 2549 /* enable rfkill interrupt: hw bug w/a */
2534 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); 2550 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
2535 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { 2551 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
@@ -2547,11 +2563,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2547 */ 2563 */
2548 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) { 2564 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
2549 unsigned long flags; 2565 unsigned long flags;
2550 int ret;
2551 2566
2552 trans->hw_rev = (trans->hw_rev & 0xfff0) | 2567 trans->hw_rev = (trans->hw_rev & 0xfff0) |
2553 (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2); 2568 (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
2554 2569
2570 ret = iwl_pcie_prepare_card_hw(trans);
2571 if (ret) {
2572 IWL_WARN(trans, "Exit HW not ready\n");
2573 goto out_pci_disable_msi;
2574 }
2575
2555 /* 2576 /*
2556 * in-order to recognize C step driver should read chip version 2577 * in-order to recognize C step driver should read chip version
2557 * id located at the AUX bus MISC address space. 2578 * id located at the AUX bus MISC address space.
@@ -2591,13 +2612,14 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2591 /* Initialize the wait queue for commands */ 2612 /* Initialize the wait queue for commands */
2592 init_waitqueue_head(&trans_pcie->wait_command_queue); 2613 init_waitqueue_head(&trans_pcie->wait_command_queue);
2593 2614
2594 if (iwl_pcie_alloc_ict(trans)) 2615 ret = iwl_pcie_alloc_ict(trans);
2616 if (ret)
2595 goto out_pci_disable_msi; 2617 goto out_pci_disable_msi;
2596 2618
2597 err = request_threaded_irq(pdev->irq, iwl_pcie_isr, 2619 ret = request_threaded_irq(pdev->irq, iwl_pcie_isr,
2598 iwl_pcie_irq_handler, 2620 iwl_pcie_irq_handler,
2599 IRQF_SHARED, DRV_NAME, trans); 2621 IRQF_SHARED, DRV_NAME, trans);
2600 if (err) { 2622 if (ret) {
2601 IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq); 2623 IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
2602 goto out_free_ict; 2624 goto out_free_ict;
2603 } 2625 }
@@ -2617,5 +2639,5 @@ out_pci_disable_device:
2617 pci_disable_device(pdev); 2639 pci_disable_device(pdev);
2618out_no_pci: 2640out_no_pci:
2619 iwl_trans_free(trans); 2641 iwl_trans_free(trans);
2620 return ERR_PTR(err); 2642 return ERR_PTR(ret);
2621} 2643}
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 2b86c2135de3..607acb53c847 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -1875,8 +1875,19 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1875 1875
1876 /* start timer if queue currently empty */ 1876 /* start timer if queue currently empty */
1877 if (q->read_ptr == q->write_ptr) { 1877 if (q->read_ptr == q->write_ptr) {
1878 if (txq->wd_timeout) 1878 if (txq->wd_timeout) {
1879 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 1879 /*
1880 * If the TXQ is active, then set the timer, if not,
1881 * set the timer in remainder so that the timer will
1882 * be armed with the right value when the station will
1883 * wake up.
1884 */
1885 if (!txq->frozen)
1886 mod_timer(&txq->stuck_timer,
1887 jiffies + txq->wd_timeout);
1888 else
1889 txq->frozen_expiry_remainder = txq->wd_timeout;
1890 }
1880 IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id); 1891 IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id);
1881 iwl_trans_pcie_ref(trans); 1892 iwl_trans_pcie_ref(trans);
1882 } 1893 }
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
index b6cc9ff47fc2..1c6788aecc62 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
@@ -172,6 +172,7 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
172 (struct rsi_91x_sdiodev *)adapter->rsi_dev; 172 (struct rsi_91x_sdiodev *)adapter->rsi_dev;
173 u32 len; 173 u32 len;
174 u32 num_blocks; 174 u32 num_blocks;
175 const u8 *fw;
175 const struct firmware *fw_entry = NULL; 176 const struct firmware *fw_entry = NULL;
176 u32 block_size = dev->tx_blk_size; 177 u32 block_size = dev->tx_blk_size;
177 int status = 0; 178 int status = 0;
@@ -200,6 +201,10 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
200 return status; 201 return status;
201 } 202 }
202 203
204 /* Copy firmware into DMA-accessible memory */
205 fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
206 if (!fw)
207 return -ENOMEM;
203 len = fw_entry->size; 208 len = fw_entry->size;
204 209
205 if (len % 4) 210 if (len % 4)
@@ -210,7 +215,8 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
210 rsi_dbg(INIT_ZONE, "%s: Instruction size:%d\n", __func__, len); 215 rsi_dbg(INIT_ZONE, "%s: Instruction size:%d\n", __func__, len);
211 rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks); 216 rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks);
212 217
213 status = rsi_copy_to_card(common, fw_entry->data, len, num_blocks); 218 status = rsi_copy_to_card(common, fw, len, num_blocks);
219 kfree(fw);
214 release_firmware(fw_entry); 220 release_firmware(fw_entry);
215 return status; 221 return status;
216} 222}
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
index 1106ce76707e..30c2cf7fa93b 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
@@ -146,7 +146,10 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
146 return status; 146 return status;
147 } 147 }
148 148
149 /* Copy firmware into DMA-accessible memory */
149 fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL); 150 fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
151 if (!fw)
152 return -ENOMEM;
150 len = fw_entry->size; 153 len = fw_entry->size;
151 154
152 if (len % 4) 155 if (len % 4)
@@ -158,6 +161,7 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
158 rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks); 161 rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks);
159 162
160 status = rsi_copy_to_card(common, fw, len, num_blocks); 163 status = rsi_copy_to_card(common, fw, len, num_blocks);
164 kfree(fw);
161 release_firmware(fw_entry); 165 release_firmware(fw_entry);
162 return status; 166 return status;
163} 167}
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index 3b3a88b53b11..585d0883c7e5 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -1015,9 +1015,12 @@ static void send_beacon_frame(struct ieee80211_hw *hw,
1015{ 1015{
1016 struct rtl_priv *rtlpriv = rtl_priv(hw); 1016 struct rtl_priv *rtlpriv = rtl_priv(hw);
1017 struct sk_buff *skb = ieee80211_beacon_get(hw, vif); 1017 struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
1018 struct rtl_tcb_desc tcb_desc;
1018 1019
1019 if (skb) 1020 if (skb) {
1020 rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, NULL); 1021 memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
1022 rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, &tcb_desc);
1023 }
1021} 1024}
1022 1025
1023static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, 1026static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
index 1017f02d7bf7..7bf88d9dcdc3 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
@@ -385,6 +385,7 @@ module_param_named(debug, rtl8723be_mod_params.debug, int, 0444);
385module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444); 385module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444);
386module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444); 386module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444);
387module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444); 387module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444);
388module_param_named(msi, rtl8723be_mod_params.msi_support, bool, 0444);
388module_param_named(disable_watchdog, rtl8723be_mod_params.disable_watchdog, 389module_param_named(disable_watchdog, rtl8723be_mod_params.disable_watchdog,
389 bool, 0444); 390 bool, 0444);
390MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); 391MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 1a83e190fc15..28577a31549d 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -61,6 +61,12 @@ void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
61void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue) 61void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
62{ 62{
63 atomic_dec(&queue->inflight_packets); 63 atomic_dec(&queue->inflight_packets);
64
65 /* Wake the dealloc thread _after_ decrementing inflight_packets so
66 * that if kthread_stop() has already been called, the dealloc thread
67 * does not wait forever with nothing to wake it.
68 */
69 wake_up(&queue->dealloc_wq);
64} 70}
65 71
66int xenvif_schedulable(struct xenvif *vif) 72int xenvif_schedulable(struct xenvif *vif)
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 880d0d63e872..3f44b522b831 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -810,23 +810,17 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
810static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue, 810static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
811 struct sk_buff *skb, 811 struct sk_buff *skb,
812 struct xen_netif_tx_request *txp, 812 struct xen_netif_tx_request *txp,
813 struct gnttab_map_grant_ref *gop) 813 struct gnttab_map_grant_ref *gop,
814 unsigned int frag_overflow,
815 struct sk_buff *nskb)
814{ 816{
815 struct skb_shared_info *shinfo = skb_shinfo(skb); 817 struct skb_shared_info *shinfo = skb_shinfo(skb);
816 skb_frag_t *frags = shinfo->frags; 818 skb_frag_t *frags = shinfo->frags;
817 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx; 819 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
818 int start; 820 int start;
819 pending_ring_idx_t index; 821 pending_ring_idx_t index;
820 unsigned int nr_slots, frag_overflow = 0; 822 unsigned int nr_slots;
821 823
822 /* At this point shinfo->nr_frags is in fact the number of
823 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
824 */
825 if (shinfo->nr_frags > MAX_SKB_FRAGS) {
826 frag_overflow = shinfo->nr_frags - MAX_SKB_FRAGS;
827 BUG_ON(frag_overflow > MAX_SKB_FRAGS);
828 shinfo->nr_frags = MAX_SKB_FRAGS;
829 }
830 nr_slots = shinfo->nr_frags; 824 nr_slots = shinfo->nr_frags;
831 825
832 /* Skip first skb fragment if it is on same page as header fragment. */ 826 /* Skip first skb fragment if it is on same page as header fragment. */
@@ -841,13 +835,6 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que
841 } 835 }
842 836
843 if (frag_overflow) { 837 if (frag_overflow) {
844 struct sk_buff *nskb = xenvif_alloc_skb(0);
845 if (unlikely(nskb == NULL)) {
846 if (net_ratelimit())
847 netdev_err(queue->vif->dev,
848 "Can't allocate the frag_list skb.\n");
849 return NULL;
850 }
851 838
852 shinfo = skb_shinfo(nskb); 839 shinfo = skb_shinfo(nskb);
853 frags = shinfo->frags; 840 frags = shinfo->frags;
@@ -1175,9 +1162,10 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1175 unsigned *copy_ops, 1162 unsigned *copy_ops,
1176 unsigned *map_ops) 1163 unsigned *map_ops)
1177{ 1164{
1178 struct gnttab_map_grant_ref *gop = queue->tx_map_ops, *request_gop; 1165 struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
1179 struct sk_buff *skb; 1166 struct sk_buff *skb, *nskb;
1180 int ret; 1167 int ret;
1168 unsigned int frag_overflow;
1181 1169
1182 while (skb_queue_len(&queue->tx_queue) < budget) { 1170 while (skb_queue_len(&queue->tx_queue) < budget) {
1183 struct xen_netif_tx_request txreq; 1171 struct xen_netif_tx_request txreq;
@@ -1265,6 +1253,29 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1265 break; 1253 break;
1266 } 1254 }
1267 1255
1256 skb_shinfo(skb)->nr_frags = ret;
1257 if (data_len < txreq.size)
1258 skb_shinfo(skb)->nr_frags++;
1259 /* At this point shinfo->nr_frags is in fact the number of
1260 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
1261 */
1262 frag_overflow = 0;
1263 nskb = NULL;
1264 if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) {
1265 frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS;
1266 BUG_ON(frag_overflow > MAX_SKB_FRAGS);
1267 skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
1268 nskb = xenvif_alloc_skb(0);
1269 if (unlikely(nskb == NULL)) {
1270 kfree_skb(skb);
1271 xenvif_tx_err(queue, &txreq, idx);
1272 if (net_ratelimit())
1273 netdev_err(queue->vif->dev,
1274 "Can't allocate the frag_list skb.\n");
1275 break;
1276 }
1277 }
1278
1268 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { 1279 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1269 struct xen_netif_extra_info *gso; 1280 struct xen_netif_extra_info *gso;
1270 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; 1281 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
@@ -1272,6 +1283,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1272 if (xenvif_set_skb_gso(queue->vif, skb, gso)) { 1283 if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
1273 /* Failure in xenvif_set_skb_gso is fatal. */ 1284 /* Failure in xenvif_set_skb_gso is fatal. */
1274 kfree_skb(skb); 1285 kfree_skb(skb);
1286 kfree_skb(nskb);
1275 break; 1287 break;
1276 } 1288 }
1277 } 1289 }
@@ -1294,9 +1306,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1294 1306
1295 (*copy_ops)++; 1307 (*copy_ops)++;
1296 1308
1297 skb_shinfo(skb)->nr_frags = ret;
1298 if (data_len < txreq.size) { 1309 if (data_len < txreq.size) {
1299 skb_shinfo(skb)->nr_frags++;
1300 frag_set_pending_idx(&skb_shinfo(skb)->frags[0], 1310 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1301 pending_idx); 1311 pending_idx);
1302 xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop); 1312 xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop);
@@ -1310,13 +1320,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1310 1320
1311 queue->pending_cons++; 1321 queue->pending_cons++;
1312 1322
1313 request_gop = xenvif_get_requests(queue, skb, txfrags, gop); 1323 gop = xenvif_get_requests(queue, skb, txfrags, gop,
1314 if (request_gop == NULL) { 1324 frag_overflow, nskb);
1315 kfree_skb(skb);
1316 xenvif_tx_err(queue, &txreq, idx);
1317 break;
1318 }
1319 gop = request_gop;
1320 1325
1321 __skb_queue_tail(&queue->tx_queue, skb); 1326 __skb_queue_tail(&queue->tx_queue, skb);
1322 1327
@@ -1536,7 +1541,6 @@ void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
1536 smp_wmb(); 1541 smp_wmb();
1537 queue->dealloc_prod++; 1542 queue->dealloc_prod++;
1538 } while (ubuf); 1543 } while (ubuf);
1539 wake_up(&queue->dealloc_wq);
1540 spin_unlock_irqrestore(&queue->callback_lock, flags); 1544 spin_unlock_irqrestore(&queue->callback_lock, flags);
1541 1545
1542 if (likely(zerocopy_success)) 1546 if (likely(zerocopy_success))
@@ -1566,13 +1570,13 @@ static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
1566 smp_rmb(); 1570 smp_rmb();
1567 1571
1568 while (dc != dp) { 1572 while (dc != dp) {
1569 BUG_ON(gop - queue->tx_unmap_ops > MAX_PENDING_REQS); 1573 BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS);
1570 pending_idx = 1574 pending_idx =
1571 queue->dealloc_ring[pending_index(dc++)]; 1575 queue->dealloc_ring[pending_index(dc++)];
1572 1576
1573 pending_idx_release[gop-queue->tx_unmap_ops] = 1577 pending_idx_release[gop - queue->tx_unmap_ops] =
1574 pending_idx; 1578 pending_idx;
1575 queue->pages_to_unmap[gop-queue->tx_unmap_ops] = 1579 queue->pages_to_unmap[gop - queue->tx_unmap_ops] =
1576 queue->mmap_pages[pending_idx]; 1580 queue->mmap_pages[pending_idx];
1577 gnttab_set_unmap_op(gop, 1581 gnttab_set_unmap_op(gop,
1578 idx_to_kaddr(queue, pending_idx), 1582 idx_to_kaddr(queue, pending_idx),
diff --git a/drivers/ntb/ntb.c b/drivers/ntb/ntb.c
index 23435f2a5486..2e2530743831 100644
--- a/drivers/ntb/ntb.c
+++ b/drivers/ntb/ntb.c
@@ -114,7 +114,7 @@ int ntb_register_device(struct ntb_dev *ntb)
114 ntb->dev.bus = &ntb_bus; 114 ntb->dev.bus = &ntb_bus;
115 ntb->dev.parent = &ntb->pdev->dev; 115 ntb->dev.parent = &ntb->pdev->dev;
116 ntb->dev.release = ntb_dev_release; 116 ntb->dev.release = ntb_dev_release;
117 dev_set_name(&ntb->dev, pci_name(ntb->pdev)); 117 dev_set_name(&ntb->dev, "%s", pci_name(ntb->pdev));
118 118
119 ntb->ctx = NULL; 119 ntb->ctx = NULL;
120 ntb->ctx_ops = NULL; 120 ntb->ctx_ops = NULL;
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index efe3ad4122f2..1c6386d5f79c 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -142,10 +142,11 @@ struct ntb_transport_qp {
142 142
143 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, 143 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
144 void *data, int len); 144 void *data, int len);
145 struct list_head rx_post_q;
145 struct list_head rx_pend_q; 146 struct list_head rx_pend_q;
146 struct list_head rx_free_q; 147 struct list_head rx_free_q;
147 spinlock_t ntb_rx_pend_q_lock; 148 /* ntb_rx_q_lock: synchronize access to rx_XXXX_q */
148 spinlock_t ntb_rx_free_q_lock; 149 spinlock_t ntb_rx_q_lock;
149 void *rx_buff; 150 void *rx_buff;
150 unsigned int rx_index; 151 unsigned int rx_index;
151 unsigned int rx_max_entry; 152 unsigned int rx_max_entry;
@@ -211,6 +212,8 @@ struct ntb_transport_ctx {
211 bool link_is_up; 212 bool link_is_up;
212 struct delayed_work link_work; 213 struct delayed_work link_work;
213 struct work_struct link_cleanup; 214 struct work_struct link_cleanup;
215
216 struct dentry *debugfs_node_dir;
214}; 217};
215 218
216enum { 219enum {
@@ -436,13 +439,17 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
436 char *buf; 439 char *buf;
437 ssize_t ret, out_offset, out_count; 440 ssize_t ret, out_offset, out_count;
438 441
442 qp = filp->private_data;
443
444 if (!qp || !qp->link_is_up)
445 return 0;
446
439 out_count = 1000; 447 out_count = 1000;
440 448
441 buf = kmalloc(out_count, GFP_KERNEL); 449 buf = kmalloc(out_count, GFP_KERNEL);
442 if (!buf) 450 if (!buf)
443 return -ENOMEM; 451 return -ENOMEM;
444 452
445 qp = filp->private_data;
446 out_offset = 0; 453 out_offset = 0;
447 out_offset += snprintf(buf + out_offset, out_count - out_offset, 454 out_offset += snprintf(buf + out_offset, out_count - out_offset,
448 "NTB QP stats\n"); 455 "NTB QP stats\n");
@@ -534,6 +541,27 @@ out:
534 return entry; 541 return entry;
535} 542}
536 543
544static struct ntb_queue_entry *ntb_list_mv(spinlock_t *lock,
545 struct list_head *list,
546 struct list_head *to_list)
547{
548 struct ntb_queue_entry *entry;
549 unsigned long flags;
550
551 spin_lock_irqsave(lock, flags);
552
553 if (list_empty(list)) {
554 entry = NULL;
555 } else {
556 entry = list_first_entry(list, struct ntb_queue_entry, entry);
557 list_move_tail(&entry->entry, to_list);
558 }
559
560 spin_unlock_irqrestore(lock, flags);
561
562 return entry;
563}
564
537static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, 565static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
538 unsigned int qp_num) 566 unsigned int qp_num)
539{ 567{
@@ -601,13 +629,16 @@ static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw)
601} 629}
602 630
603static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, 631static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
604 unsigned int size) 632 resource_size_t size)
605{ 633{
606 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; 634 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
607 struct pci_dev *pdev = nt->ndev->pdev; 635 struct pci_dev *pdev = nt->ndev->pdev;
608 unsigned int xlat_size, buff_size; 636 size_t xlat_size, buff_size;
609 int rc; 637 int rc;
610 638
639 if (!size)
640 return -EINVAL;
641
611 xlat_size = round_up(size, mw->xlat_align_size); 642 xlat_size = round_up(size, mw->xlat_align_size);
612 buff_size = round_up(size, mw->xlat_align); 643 buff_size = round_up(size, mw->xlat_align);
613 644
@@ -627,7 +658,7 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
627 if (!mw->virt_addr) { 658 if (!mw->virt_addr) {
628 mw->xlat_size = 0; 659 mw->xlat_size = 0;
629 mw->buff_size = 0; 660 mw->buff_size = 0;
630 dev_err(&pdev->dev, "Unable to alloc MW buff of size %d\n", 661 dev_err(&pdev->dev, "Unable to alloc MW buff of size %zu\n",
631 buff_size); 662 buff_size);
632 return -ENOMEM; 663 return -ENOMEM;
633 } 664 }
@@ -867,6 +898,8 @@ static void ntb_qp_link_work(struct work_struct *work)
867 898
868 if (qp->event_handler) 899 if (qp->event_handler)
869 qp->event_handler(qp->cb_data, qp->link_is_up); 900 qp->event_handler(qp->cb_data, qp->link_is_up);
901
902 tasklet_schedule(&qp->rxc_db_work);
870 } else if (nt->link_is_up) 903 } else if (nt->link_is_up)
871 schedule_delayed_work(&qp->link_work, 904 schedule_delayed_work(&qp->link_work,
872 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT)); 905 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
@@ -923,12 +956,12 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
923 qp->tx_max_frame = min(transport_mtu, tx_size / 2); 956 qp->tx_max_frame = min(transport_mtu, tx_size / 2);
924 qp->tx_max_entry = tx_size / qp->tx_max_frame; 957 qp->tx_max_entry = tx_size / qp->tx_max_frame;
925 958
926 if (nt_debugfs_dir) { 959 if (nt->debugfs_node_dir) {
927 char debugfs_name[4]; 960 char debugfs_name[4];
928 961
929 snprintf(debugfs_name, 4, "qp%d", qp_num); 962 snprintf(debugfs_name, 4, "qp%d", qp_num);
930 qp->debugfs_dir = debugfs_create_dir(debugfs_name, 963 qp->debugfs_dir = debugfs_create_dir(debugfs_name,
931 nt_debugfs_dir); 964 nt->debugfs_node_dir);
932 965
933 qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR, 966 qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
934 qp->debugfs_dir, qp, 967 qp->debugfs_dir, qp,
@@ -941,10 +974,10 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
941 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work); 974 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
942 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work); 975 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work);
943 976
944 spin_lock_init(&qp->ntb_rx_pend_q_lock); 977 spin_lock_init(&qp->ntb_rx_q_lock);
945 spin_lock_init(&qp->ntb_rx_free_q_lock);
946 spin_lock_init(&qp->ntb_tx_free_q_lock); 978 spin_lock_init(&qp->ntb_tx_free_q_lock);
947 979
980 INIT_LIST_HEAD(&qp->rx_post_q);
948 INIT_LIST_HEAD(&qp->rx_pend_q); 981 INIT_LIST_HEAD(&qp->rx_pend_q);
949 INIT_LIST_HEAD(&qp->rx_free_q); 982 INIT_LIST_HEAD(&qp->rx_free_q);
950 INIT_LIST_HEAD(&qp->tx_free_q); 983 INIT_LIST_HEAD(&qp->tx_free_q);
@@ -1031,6 +1064,12 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
1031 goto err2; 1064 goto err2;
1032 } 1065 }
1033 1066
1067 if (nt_debugfs_dir) {
1068 nt->debugfs_node_dir =
1069 debugfs_create_dir(pci_name(ndev->pdev),
1070 nt_debugfs_dir);
1071 }
1072
1034 for (i = 0; i < qp_count; i++) { 1073 for (i = 0; i < qp_count; i++) {
1035 rc = ntb_transport_init_queue(nt, i); 1074 rc = ntb_transport_init_queue(nt, i);
1036 if (rc) 1075 if (rc)
@@ -1107,22 +1146,47 @@ static void ntb_transport_free(struct ntb_client *self, struct ntb_dev *ndev)
1107 kfree(nt); 1146 kfree(nt);
1108} 1147}
1109 1148
1110static void ntb_rx_copy_callback(void *data) 1149static void ntb_complete_rxc(struct ntb_transport_qp *qp)
1111{ 1150{
1112 struct ntb_queue_entry *entry = data; 1151 struct ntb_queue_entry *entry;
1113 struct ntb_transport_qp *qp = entry->qp; 1152 void *cb_data;
1114 void *cb_data = entry->cb_data; 1153 unsigned int len;
1115 unsigned int len = entry->len; 1154 unsigned long irqflags;
1116 struct ntb_payload_header *hdr = entry->rx_hdr; 1155
1156 spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
1157
1158 while (!list_empty(&qp->rx_post_q)) {
1159 entry = list_first_entry(&qp->rx_post_q,
1160 struct ntb_queue_entry, entry);
1161 if (!(entry->flags & DESC_DONE_FLAG))
1162 break;
1163
1164 entry->rx_hdr->flags = 0;
1165 iowrite32(entry->index, &qp->rx_info->entry);
1117 1166
1118 hdr->flags = 0; 1167 cb_data = entry->cb_data;
1168 len = entry->len;
1119 1169
1120 iowrite32(entry->index, &qp->rx_info->entry); 1170 list_move_tail(&entry->entry, &qp->rx_free_q);
1121 1171
1122 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q); 1172 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
1123 1173
1124 if (qp->rx_handler && qp->client_ready) 1174 if (qp->rx_handler && qp->client_ready)
1125 qp->rx_handler(qp, qp->cb_data, cb_data, len); 1175 qp->rx_handler(qp, qp->cb_data, cb_data, len);
1176
1177 spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
1178 }
1179
1180 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
1181}
1182
1183static void ntb_rx_copy_callback(void *data)
1184{
1185 struct ntb_queue_entry *entry = data;
1186
1187 entry->flags |= DESC_DONE_FLAG;
1188
1189 ntb_complete_rxc(entry->qp);
1126} 1190}
1127 1191
1128static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset) 1192static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
@@ -1138,19 +1202,18 @@ static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
1138 ntb_rx_copy_callback(entry); 1202 ntb_rx_copy_callback(entry);
1139} 1203}
1140 1204
1141static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset, 1205static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
1142 size_t len)
1143{ 1206{
1144 struct dma_async_tx_descriptor *txd; 1207 struct dma_async_tx_descriptor *txd;
1145 struct ntb_transport_qp *qp = entry->qp; 1208 struct ntb_transport_qp *qp = entry->qp;
1146 struct dma_chan *chan = qp->dma_chan; 1209 struct dma_chan *chan = qp->dma_chan;
1147 struct dma_device *device; 1210 struct dma_device *device;
1148 size_t pay_off, buff_off; 1211 size_t pay_off, buff_off, len;
1149 struct dmaengine_unmap_data *unmap; 1212 struct dmaengine_unmap_data *unmap;
1150 dma_cookie_t cookie; 1213 dma_cookie_t cookie;
1151 void *buf = entry->buf; 1214 void *buf = entry->buf;
1152 1215
1153 entry->len = len; 1216 len = entry->len;
1154 1217
1155 if (!chan) 1218 if (!chan)
1156 goto err; 1219 goto err;
@@ -1226,7 +1289,6 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp)
1226 struct ntb_payload_header *hdr; 1289 struct ntb_payload_header *hdr;
1227 struct ntb_queue_entry *entry; 1290 struct ntb_queue_entry *entry;
1228 void *offset; 1291 void *offset;
1229 int rc;
1230 1292
1231 offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index; 1293 offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
1232 hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header); 1294 hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
@@ -1255,65 +1317,43 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp)
1255 return -EIO; 1317 return -EIO;
1256 } 1318 }
1257 1319
1258 entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q); 1320 entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q);
1259 if (!entry) { 1321 if (!entry) {
1260 dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n"); 1322 dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n");
1261 qp->rx_err_no_buf++; 1323 qp->rx_err_no_buf++;
1262 1324 return -EAGAIN;
1263 rc = -ENOMEM;
1264 goto err;
1265 } 1325 }
1266 1326
1327 entry->rx_hdr = hdr;
1328 entry->index = qp->rx_index;
1329
1267 if (hdr->len > entry->len) { 1330 if (hdr->len > entry->len) {
1268 dev_dbg(&qp->ndev->pdev->dev, 1331 dev_dbg(&qp->ndev->pdev->dev,
1269 "receive buffer overflow! Wanted %d got %d\n", 1332 "receive buffer overflow! Wanted %d got %d\n",
1270 hdr->len, entry->len); 1333 hdr->len, entry->len);
1271 qp->rx_err_oflow++; 1334 qp->rx_err_oflow++;
1272 1335
1273 rc = -EIO; 1336 entry->len = -EIO;
1274 goto err; 1337 entry->flags |= DESC_DONE_FLAG;
1275 }
1276 1338
1277 dev_dbg(&qp->ndev->pdev->dev, 1339 ntb_complete_rxc(qp);
1278 "RX OK index %u ver %u size %d into buf size %d\n", 1340 } else {
1279 qp->rx_index, hdr->ver, hdr->len, entry->len); 1341 dev_dbg(&qp->ndev->pdev->dev,
1342 "RX OK index %u ver %u size %d into buf size %d\n",
1343 qp->rx_index, hdr->ver, hdr->len, entry->len);
1280 1344
1281 qp->rx_bytes += hdr->len; 1345 qp->rx_bytes += hdr->len;
1282 qp->rx_pkts++; 1346 qp->rx_pkts++;
1283 1347
1284 entry->index = qp->rx_index; 1348 entry->len = hdr->len;
1285 entry->rx_hdr = hdr;
1286 1349
1287 ntb_async_rx(entry, offset, hdr->len); 1350 ntb_async_rx(entry, offset);
1351 }
1288 1352
1289 qp->rx_index++; 1353 qp->rx_index++;
1290 qp->rx_index %= qp->rx_max_entry; 1354 qp->rx_index %= qp->rx_max_entry;
1291 1355
1292 return 0; 1356 return 0;
1293
1294err:
1295 /* FIXME: if this syncrhonous update of the rx_index gets ahead of
1296 * asyncrhonous ntb_rx_copy_callback of previous entry, there are three
1297 * scenarios:
1298 *
1299 * 1) The peer might miss this update, but observe the update
1300 * from the memcpy completion callback. In this case, the buffer will
1301 * not be freed on the peer to be reused for a different packet. The
1302 * successful rx of a later packet would clear the condition, but the
1303 * condition could persist if several rx fail in a row.
1304 *
1305 * 2) The peer may observe this update before the asyncrhonous copy of
1306 * prior packets is completed. The peer may overwrite the buffers of
1307 * the prior packets before they are copied.
1308 *
1309 * 3) Both: the peer may observe the update, and then observe the index
1310 * decrement by the asynchronous completion callback. Who knows what
1311 * badness that will cause.
1312 */
1313 hdr->flags = 0;
1314 iowrite32(qp->rx_index, &qp->rx_info->entry);
1315
1316 return rc;
1317} 1357}
1318 1358
1319static void ntb_transport_rxc_db(unsigned long data) 1359static void ntb_transport_rxc_db(unsigned long data)
@@ -1333,7 +1373,7 @@ static void ntb_transport_rxc_db(unsigned long data)
1333 break; 1373 break;
1334 } 1374 }
1335 1375
1336 if (qp->dma_chan) 1376 if (i && qp->dma_chan)
1337 dma_async_issue_pending(qp->dma_chan); 1377 dma_async_issue_pending(qp->dma_chan);
1338 1378
1339 if (i == qp->rx_max_entry) { 1379 if (i == qp->rx_max_entry) {
@@ -1609,7 +1649,7 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
1609 goto err1; 1649 goto err1;
1610 1650
1611 entry->qp = qp; 1651 entry->qp = qp;
1612 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, 1652 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry,
1613 &qp->rx_free_q); 1653 &qp->rx_free_q);
1614 } 1654 }
1615 1655
@@ -1634,7 +1674,7 @@ err2:
1634 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) 1674 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1635 kfree(entry); 1675 kfree(entry);
1636err1: 1676err1:
1637 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) 1677 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
1638 kfree(entry); 1678 kfree(entry);
1639 if (qp->dma_chan) 1679 if (qp->dma_chan)
1640 dma_release_channel(qp->dma_chan); 1680 dma_release_channel(qp->dma_chan);
@@ -1652,7 +1692,6 @@ EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
1652 */ 1692 */
1653void ntb_transport_free_queue(struct ntb_transport_qp *qp) 1693void ntb_transport_free_queue(struct ntb_transport_qp *qp)
1654{ 1694{
1655 struct ntb_transport_ctx *nt = qp->transport;
1656 struct pci_dev *pdev; 1695 struct pci_dev *pdev;
1657 struct ntb_queue_entry *entry; 1696 struct ntb_queue_entry *entry;
1658 u64 qp_bit; 1697 u64 qp_bit;
@@ -1689,18 +1728,23 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp)
1689 qp->tx_handler = NULL; 1728 qp->tx_handler = NULL;
1690 qp->event_handler = NULL; 1729 qp->event_handler = NULL;
1691 1730
1692 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) 1731 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
1693 kfree(entry); 1732 kfree(entry);
1694 1733
1695 while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) { 1734 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) {
1696 dev_warn(&pdev->dev, "Freeing item from a non-empty queue\n"); 1735 dev_warn(&pdev->dev, "Freeing item from non-empty rx_pend_q\n");
1736 kfree(entry);
1737 }
1738
1739 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) {
1740 dev_warn(&pdev->dev, "Freeing item from non-empty rx_post_q\n");
1697 kfree(entry); 1741 kfree(entry);
1698 } 1742 }
1699 1743
1700 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) 1744 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1701 kfree(entry); 1745 kfree(entry);
1702 1746
1703 nt->qp_bitmap_free |= qp_bit; 1747 qp->transport->qp_bitmap_free |= qp_bit;
1704 1748
1705 dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num); 1749 dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
1706} 1750}
@@ -1724,14 +1768,14 @@ void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
1724 if (!qp || qp->client_ready) 1768 if (!qp || qp->client_ready)
1725 return NULL; 1769 return NULL;
1726 1770
1727 entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q); 1771 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q);
1728 if (!entry) 1772 if (!entry)
1729 return NULL; 1773 return NULL;
1730 1774
1731 buf = entry->cb_data; 1775 buf = entry->cb_data;
1732 *len = entry->len; 1776 *len = entry->len;
1733 1777
1734 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q); 1778 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q);
1735 1779
1736 return buf; 1780 return buf;
1737} 1781}
@@ -1757,15 +1801,18 @@ int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1757 if (!qp) 1801 if (!qp)
1758 return -EINVAL; 1802 return -EINVAL;
1759 1803
1760 entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q); 1804 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q);
1761 if (!entry) 1805 if (!entry)
1762 return -ENOMEM; 1806 return -ENOMEM;
1763 1807
1764 entry->cb_data = cb; 1808 entry->cb_data = cb;
1765 entry->buf = data; 1809 entry->buf = data;
1766 entry->len = len; 1810 entry->len = len;
1811 entry->flags = 0;
1812
1813 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q);
1767 1814
1768 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q); 1815 tasklet_schedule(&qp->rxc_db_work);
1769 1816
1770 return 0; 1817 return 0;
1771} 1818}
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index a5233422f9dc..7384455792bf 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -458,10 +458,15 @@ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
458 nvdimm_bus_unlock(dev); 458 nvdimm_bus_unlock(dev);
459 } 459 }
460 if (is_nd_btt(dev) && probe) { 460 if (is_nd_btt(dev) && probe) {
461 struct nd_btt *nd_btt = to_nd_btt(dev);
462
461 nd_region = to_nd_region(dev->parent); 463 nd_region = to_nd_region(dev->parent);
462 nvdimm_bus_lock(dev); 464 nvdimm_bus_lock(dev);
463 if (nd_region->btt_seed == dev) 465 if (nd_region->btt_seed == dev)
464 nd_region_create_btt_seed(nd_region); 466 nd_region_create_btt_seed(nd_region);
467 if (nd_region->ns_seed == &nd_btt->ndns->dev &&
468 is_nd_blk(dev->parent))
469 nd_region_create_blk_seed(nd_region);
465 nvdimm_bus_unlock(dev); 470 nvdimm_bus_unlock(dev);
466 } 471 }
467} 472}
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 8df1b1777745..59bb8556e43a 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -47,7 +47,7 @@ config OF_DYNAMIC
47 47
48config OF_ADDRESS 48config OF_ADDRESS
49 def_bool y 49 def_bool y
50 depends on !SPARC 50 depends on !SPARC && HAS_IOMEM
51 select OF_ADDRESS_PCI if PCI 51 select OF_ADDRESS_PCI if PCI
52 52
53config OF_ADDRESS_PCI 53config OF_ADDRESS_PCI
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 18016341d5a9..9f71770b6226 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -979,7 +979,6 @@ static struct platform_driver unittest_driver = {
979 .remove = unittest_remove, 979 .remove = unittest_remove,
980 .driver = { 980 .driver = {
981 .name = "unittest", 981 .name = "unittest",
982 .owner = THIS_MODULE,
983 .of_match_table = of_match_ptr(unittest_match), 982 .of_match_table = of_match_ptr(unittest_match),
984 }, 983 },
985}; 984};
@@ -1666,7 +1665,6 @@ static const struct i2c_device_id unittest_i2c_dev_id[] = {
1666static struct i2c_driver unittest_i2c_dev_driver = { 1665static struct i2c_driver unittest_i2c_dev_driver = {
1667 .driver = { 1666 .driver = {
1668 .name = "unittest-i2c-dev", 1667 .name = "unittest-i2c-dev",
1669 .owner = THIS_MODULE,
1670 }, 1668 },
1671 .probe = unittest_i2c_dev_probe, 1669 .probe = unittest_i2c_dev_probe,
1672 .remove = unittest_i2c_dev_remove, 1670 .remove = unittest_i2c_dev_remove,
@@ -1761,7 +1759,6 @@ static const struct i2c_device_id unittest_i2c_mux_id[] = {
1761static struct i2c_driver unittest_i2c_mux_driver = { 1759static struct i2c_driver unittest_i2c_mux_driver = {
1762 .driver = { 1760 .driver = {
1763 .name = "unittest-i2c-mux", 1761 .name = "unittest-i2c-mux",
1764 .owner = THIS_MODULE,
1765 }, 1762 },
1766 .probe = unittest_i2c_mux_probe, 1763 .probe = unittest_i2c_mux_probe,
1767 .remove = unittest_i2c_mux_remove, 1764 .remove = unittest_i2c_mux_remove,
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index 8067f54ce050..5ce5ef211bdb 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -891,8 +891,10 @@ parport_register_dev_model(struct parport *port, const char *name,
891 par_dev->dev.release = free_pardevice; 891 par_dev->dev.release = free_pardevice;
892 par_dev->devmodel = true; 892 par_dev->devmodel = true;
893 ret = device_register(&par_dev->dev); 893 ret = device_register(&par_dev->dev);
894 if (ret) 894 if (ret) {
895 goto err_put_dev; 895 put_device(&par_dev->dev);
896 goto err_put_port;
897 }
896 898
897 /* Chain this onto the list */ 899 /* Chain this onto the list */
898 par_dev->prev = NULL; 900 par_dev->prev = NULL;
@@ -907,7 +909,8 @@ parport_register_dev_model(struct parport *port, const char *name,
907 spin_unlock(&port->physport->pardevice_lock); 909 spin_unlock(&port->physport->pardevice_lock);
908 pr_debug("%s: cannot grant exclusive access for device %s\n", 910 pr_debug("%s: cannot grant exclusive access for device %s\n",
909 port->name, name); 911 port->name, name);
910 goto err_put_dev; 912 device_unregister(&par_dev->dev);
913 goto err_put_port;
911 } 914 }
912 port->flags |= PARPORT_FLAG_EXCL; 915 port->flags |= PARPORT_FLAG_EXCL;
913 } 916 }
@@ -938,8 +941,6 @@ parport_register_dev_model(struct parport *port, const char *name,
938 941
939 return par_dev; 942 return par_dev;
940 943
941err_put_dev:
942 put_device(&par_dev->dev);
943err_free_devname: 944err_free_devname:
944 kfree(devname); 945 kfree(devname);
945err_free_par_dev: 946err_free_par_dev:
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 73de4efcbe6e..944f50015ed0 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -2,7 +2,7 @@
2# PCI configuration 2# PCI configuration
3# 3#
4config PCI_BUS_ADDR_T_64BIT 4config PCI_BUS_ADDR_T_64BIT
5 def_bool y if (ARCH_DMA_ADDR_T_64BIT || 64BIT) 5 def_bool y if (ARCH_DMA_ADDR_T_64BIT || (64BIT && !PARISC))
6 depends on PCI 6 depends on PCI
7 7
8config PCI_MSI 8config PCI_MSI
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index cefd636681b6..b978bbfe044c 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -997,7 +997,12 @@ void set_pcie_port_type(struct pci_dev *pdev)
997 else if (type == PCI_EXP_TYPE_UPSTREAM || 997 else if (type == PCI_EXP_TYPE_UPSTREAM ||
998 type == PCI_EXP_TYPE_DOWNSTREAM) { 998 type == PCI_EXP_TYPE_DOWNSTREAM) {
999 parent = pci_upstream_bridge(pdev); 999 parent = pci_upstream_bridge(pdev);
1000 if (!parent->has_secondary_link) 1000
1001 /*
1002 * Usually there's an upstream device (Root Port or Switch
1003 * Downstream Port), but we can't assume one exists.
1004 */
1005 if (parent && !parent->has_secondary_link)
1001 pdev->has_secondary_link = 1; 1006 pdev->has_secondary_link = 1;
1002 } 1007 }
1003} 1008}
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index c0e6ede3e27d..6b8dd162f644 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -56,6 +56,7 @@ config PHY_EXYNOS_MIPI_VIDEO
56 56
57config PHY_PXA_28NM_HSIC 57config PHY_PXA_28NM_HSIC
58 tristate "Marvell USB HSIC 28nm PHY Driver" 58 tristate "Marvell USB HSIC 28nm PHY Driver"
59 depends on HAS_IOMEM
59 select GENERIC_PHY 60 select GENERIC_PHY
60 help 61 help
61 Enable this to support Marvell USB HSIC PHY driver for Marvell 62 Enable this to support Marvell USB HSIC PHY driver for Marvell
@@ -66,6 +67,7 @@ config PHY_PXA_28NM_HSIC
66 67
67config PHY_PXA_28NM_USB2 68config PHY_PXA_28NM_USB2
68 tristate "Marvell USB 2.0 28nm PHY Driver" 69 tristate "Marvell USB 2.0 28nm PHY Driver"
70 depends on HAS_IOMEM
69 select GENERIC_PHY 71 select GENERIC_PHY
70 help 72 help
71 Enable this to support Marvell USB 2.0 PHY driver for Marvell 73 Enable this to support Marvell USB 2.0 PHY driver for Marvell
diff --git a/drivers/phy/phy-berlin-usb.c b/drivers/phy/phy-berlin-usb.c
index c6fc95b53083..335e06d66ed9 100644
--- a/drivers/phy/phy-berlin-usb.c
+++ b/drivers/phy/phy-berlin-usb.c
@@ -105,9 +105,9 @@
105 105
106static const u32 phy_berlin_pll_dividers[] = { 106static const u32 phy_berlin_pll_dividers[] = {
107 /* Berlin 2 */ 107 /* Berlin 2 */
108 CLK_REF_DIV(0xc) | FEEDBACK_CLK_DIV(0x54),
109 /* Berlin 2CD */
110 CLK_REF_DIV(0x6) | FEEDBACK_CLK_DIV(0x55), 108 CLK_REF_DIV(0x6) | FEEDBACK_CLK_DIV(0x55),
109 /* Berlin 2CD/Q */
110 CLK_REF_DIV(0xc) | FEEDBACK_CLK_DIV(0x54),
111}; 111};
112 112
113struct phy_berlin_usb_priv { 113struct phy_berlin_usb_priv {
diff --git a/drivers/phy/phy-sun4i-usb.c b/drivers/phy/phy-sun4i-usb.c
index e17c539e4f6f..2dad7e820ff0 100644
--- a/drivers/phy/phy-sun4i-usb.c
+++ b/drivers/phy/phy-sun4i-usb.c
@@ -212,6 +212,7 @@ void sun4i_usb_phy_set_squelch_detect(struct phy *_phy, bool enabled)
212 212
213 sun4i_usb_phy_write(phy, PHY_SQUELCH_DETECT, enabled ? 0 : 2, 2); 213 sun4i_usb_phy_write(phy, PHY_SQUELCH_DETECT, enabled ? 0 : 2, 2);
214} 214}
215EXPORT_SYMBOL_GPL(sun4i_usb_phy_set_squelch_detect);
215 216
216static struct phy_ops sun4i_usb_phy_ops = { 217static struct phy_ops sun4i_usb_phy_ops = {
217 .init = sun4i_usb_phy_init, 218 .init = sun4i_usb_phy_init,
diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/phy-ti-pipe3.c
index 53f295c1bab1..08020dc2c7c8 100644
--- a/drivers/phy/phy-ti-pipe3.c
+++ b/drivers/phy/phy-ti-pipe3.c
@@ -28,7 +28,8 @@
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <linux/phy/omap_control_phy.h> 29#include <linux/phy/omap_control_phy.h>
30#include <linux/of_platform.h> 30#include <linux/of_platform.h>
31#include <linux/spinlock.h> 31#include <linux/mfd/syscon.h>
32#include <linux/regmap.h>
32 33
33#define PLL_STATUS 0x00000004 34#define PLL_STATUS 0x00000004
34#define PLL_GO 0x00000008 35#define PLL_GO 0x00000008
@@ -53,6 +54,8 @@
53#define PLL_LOCK 0x2 54#define PLL_LOCK 0x2
54#define PLL_IDLE 0x1 55#define PLL_IDLE 0x1
55 56
57#define SATA_PLL_SOFT_RESET BIT(18)
58
56/* 59/*
57 * This is an Empirical value that works, need to confirm the actual 60 * This is an Empirical value that works, need to confirm the actual
58 * value required for the PIPE3PHY_PLL_CONFIGURATION2.PLL_IDLE status 61 * value required for the PIPE3PHY_PLL_CONFIGURATION2.PLL_IDLE status
@@ -83,10 +86,9 @@ struct ti_pipe3 {
83 struct clk *refclk; 86 struct clk *refclk;
84 struct clk *div_clk; 87 struct clk *div_clk;
85 struct pipe3_dpll_map *dpll_map; 88 struct pipe3_dpll_map *dpll_map;
86 bool enabled; 89 struct regmap *dpll_reset_syscon; /* ctrl. reg. acces */
87 spinlock_t lock; /* serialize clock enable/disable */ 90 unsigned int dpll_reset_reg; /* reg. index within syscon */
88 /* the below flag is needed specifically for SATA */ 91 bool sata_refclk_enabled;
89 bool refclk_enabled;
90}; 92};
91 93
92static struct pipe3_dpll_map dpll_map_usb[] = { 94static struct pipe3_dpll_map dpll_map_usb[] = {
@@ -137,6 +139,9 @@ static struct pipe3_dpll_params *ti_pipe3_get_dpll_params(struct ti_pipe3 *phy)
137 return NULL; 139 return NULL;
138} 140}
139 141
142static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy);
143static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy);
144
140static int ti_pipe3_power_off(struct phy *x) 145static int ti_pipe3_power_off(struct phy *x)
141{ 146{
142 struct ti_pipe3 *phy = phy_get_drvdata(x); 147 struct ti_pipe3 *phy = phy_get_drvdata(x);
@@ -217,6 +222,7 @@ static int ti_pipe3_init(struct phy *x)
217 u32 val; 222 u32 val;
218 int ret = 0; 223 int ret = 0;
219 224
225 ti_pipe3_enable_clocks(phy);
220 /* 226 /*
221 * Set pcie_pcs register to 0x96 for proper functioning of phy 227 * Set pcie_pcs register to 0x96 for proper functioning of phy
222 * as recommended in AM572x TRM SPRUHZ6, section 18.5.2.2, table 228 * as recommended in AM572x TRM SPRUHZ6, section 18.5.2.2, table
@@ -250,33 +256,46 @@ static int ti_pipe3_exit(struct phy *x)
250 u32 val; 256 u32 val;
251 unsigned long timeout; 257 unsigned long timeout;
252 258
253 /* SATA DPLL can't be powered down due to Errata i783 and PCIe 259 /* If dpll_reset_syscon is not present we wont power down SATA DPLL
254 * does not have internal DPLL 260 * due to Errata i783
255 */ 261 */
256 if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata") || 262 if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata") &&
257 of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-pcie")) 263 !phy->dpll_reset_syscon)
258 return 0; 264 return 0;
259 265
260 /* Put DPLL in IDLE mode */ 266 /* PCIe doesn't have internal DPLL */
261 val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2); 267 if (!of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-pcie")) {
262 val |= PLL_IDLE; 268 /* Put DPLL in IDLE mode */
263 ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val); 269 val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2);
270 val |= PLL_IDLE;
271 ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val);
264 272
265 /* wait for LDO and Oscillator to power down */ 273 /* wait for LDO and Oscillator to power down */
266 timeout = jiffies + msecs_to_jiffies(PLL_IDLE_TIME); 274 timeout = jiffies + msecs_to_jiffies(PLL_IDLE_TIME);
267 do { 275 do {
268 cpu_relax(); 276 cpu_relax();
269 val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS); 277 val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS);
270 if ((val & PLL_TICOPWDN) && (val & PLL_LDOPWDN)) 278 if ((val & PLL_TICOPWDN) && (val & PLL_LDOPWDN))
271 break; 279 break;
272 } while (!time_after(jiffies, timeout)); 280 } while (!time_after(jiffies, timeout));
281
282 if (!(val & PLL_TICOPWDN) || !(val & PLL_LDOPWDN)) {
283 dev_err(phy->dev, "Failed to power down: PLL_STATUS 0x%x\n",
284 val);
285 return -EBUSY;
286 }
287 }
273 288
274 if (!(val & PLL_TICOPWDN) || !(val & PLL_LDOPWDN)) { 289 /* i783: SATA needs control bit toggle after PLL unlock */
275 dev_err(phy->dev, "Failed to power down: PLL_STATUS 0x%x\n", 290 if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata")) {
276 val); 291 regmap_update_bits(phy->dpll_reset_syscon, phy->dpll_reset_reg,
277 return -EBUSY; 292 SATA_PLL_SOFT_RESET, SATA_PLL_SOFT_RESET);
293 regmap_update_bits(phy->dpll_reset_syscon, phy->dpll_reset_reg,
294 SATA_PLL_SOFT_RESET, 0);
278 } 295 }
279 296
297 ti_pipe3_disable_clocks(phy);
298
280 return 0; 299 return 0;
281} 300}
282static struct phy_ops ops = { 301static struct phy_ops ops = {
@@ -306,7 +325,6 @@ static int ti_pipe3_probe(struct platform_device *pdev)
306 return -ENOMEM; 325 return -ENOMEM;
307 326
308 phy->dev = &pdev->dev; 327 phy->dev = &pdev->dev;
309 spin_lock_init(&phy->lock);
310 328
311 if (!of_device_is_compatible(node, "ti,phy-pipe3-pcie")) { 329 if (!of_device_is_compatible(node, "ti,phy-pipe3-pcie")) {
312 match = of_match_device(ti_pipe3_id_table, &pdev->dev); 330 match = of_match_device(ti_pipe3_id_table, &pdev->dev);
@@ -350,6 +368,21 @@ static int ti_pipe3_probe(struct platform_device *pdev)
350 } 368 }
351 } else { 369 } else {
352 phy->wkupclk = ERR_PTR(-ENODEV); 370 phy->wkupclk = ERR_PTR(-ENODEV);
371 phy->dpll_reset_syscon = syscon_regmap_lookup_by_phandle(node,
372 "syscon-pllreset");
373 if (IS_ERR(phy->dpll_reset_syscon)) {
374 dev_info(&pdev->dev,
375 "can't get syscon-pllreset, sata dpll won't idle\n");
376 phy->dpll_reset_syscon = NULL;
377 } else {
378 if (of_property_read_u32_index(node,
379 "syscon-pllreset", 1,
380 &phy->dpll_reset_reg)) {
381 dev_err(&pdev->dev,
382 "couldn't get pllreset reg. offset\n");
383 return -EINVAL;
384 }
385 }
353 } 386 }
354 387
355 if (of_device_is_compatible(node, "ti,phy-pipe3-pcie")) { 388 if (of_device_is_compatible(node, "ti,phy-pipe3-pcie")) {
@@ -403,6 +436,16 @@ static int ti_pipe3_probe(struct platform_device *pdev)
403 platform_set_drvdata(pdev, phy); 436 platform_set_drvdata(pdev, phy);
404 pm_runtime_enable(phy->dev); 437 pm_runtime_enable(phy->dev);
405 438
439 /*
440 * Prevent auto-disable of refclk for SATA PHY due to Errata i783
441 */
442 if (of_device_is_compatible(node, "ti,phy-pipe3-sata")) {
443 if (!IS_ERR(phy->refclk)) {
444 clk_prepare_enable(phy->refclk);
445 phy->sata_refclk_enabled = true;
446 }
447 }
448
406 generic_phy = devm_phy_create(phy->dev, NULL, &ops); 449 generic_phy = devm_phy_create(phy->dev, NULL, &ops);
407 if (IS_ERR(generic_phy)) 450 if (IS_ERR(generic_phy))
408 return PTR_ERR(generic_phy); 451 return PTR_ERR(generic_phy);
@@ -413,63 +456,33 @@ static int ti_pipe3_probe(struct platform_device *pdev)
413 if (IS_ERR(phy_provider)) 456 if (IS_ERR(phy_provider))
414 return PTR_ERR(phy_provider); 457 return PTR_ERR(phy_provider);
415 458
416 pm_runtime_get(&pdev->dev);
417
418 return 0; 459 return 0;
419} 460}
420 461
421static int ti_pipe3_remove(struct platform_device *pdev) 462static int ti_pipe3_remove(struct platform_device *pdev)
422{ 463{
423 if (!pm_runtime_suspended(&pdev->dev))
424 pm_runtime_put(&pdev->dev);
425 pm_runtime_disable(&pdev->dev); 464 pm_runtime_disable(&pdev->dev);
426 465
427 return 0; 466 return 0;
428} 467}
429 468
430#ifdef CONFIG_PM 469static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy)
431static int ti_pipe3_enable_refclk(struct ti_pipe3 *phy)
432{ 470{
433 if (!IS_ERR(phy->refclk) && !phy->refclk_enabled) { 471 int ret = 0;
434 int ret;
435 472
473 if (!IS_ERR(phy->refclk)) {
436 ret = clk_prepare_enable(phy->refclk); 474 ret = clk_prepare_enable(phy->refclk);
437 if (ret) { 475 if (ret) {
438 dev_err(phy->dev, "Failed to enable refclk %d\n", ret); 476 dev_err(phy->dev, "Failed to enable refclk %d\n", ret);
439 return ret; 477 return ret;
440 } 478 }
441 phy->refclk_enabled = true;
442 } 479 }
443 480
444 return 0;
445}
446
447static void ti_pipe3_disable_refclk(struct ti_pipe3 *phy)
448{
449 if (!IS_ERR(phy->refclk))
450 clk_disable_unprepare(phy->refclk);
451
452 phy->refclk_enabled = false;
453}
454
455static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy)
456{
457 int ret = 0;
458 unsigned long flags;
459
460 spin_lock_irqsave(&phy->lock, flags);
461 if (phy->enabled)
462 goto err1;
463
464 ret = ti_pipe3_enable_refclk(phy);
465 if (ret)
466 goto err1;
467
468 if (!IS_ERR(phy->wkupclk)) { 481 if (!IS_ERR(phy->wkupclk)) {
469 ret = clk_prepare_enable(phy->wkupclk); 482 ret = clk_prepare_enable(phy->wkupclk);
470 if (ret) { 483 if (ret) {
471 dev_err(phy->dev, "Failed to enable wkupclk %d\n", ret); 484 dev_err(phy->dev, "Failed to enable wkupclk %d\n", ret);
472 goto err2; 485 goto disable_refclk;
473 } 486 }
474 } 487 }
475 488
@@ -477,96 +490,43 @@ static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy)
477 ret = clk_prepare_enable(phy->div_clk); 490 ret = clk_prepare_enable(phy->div_clk);
478 if (ret) { 491 if (ret) {
479 dev_err(phy->dev, "Failed to enable div_clk %d\n", ret); 492 dev_err(phy->dev, "Failed to enable div_clk %d\n", ret);
480 goto err3; 493 goto disable_wkupclk;
481 } 494 }
482 } 495 }
483 496
484 phy->enabled = true;
485 spin_unlock_irqrestore(&phy->lock, flags);
486 return 0; 497 return 0;
487 498
488err3: 499disable_wkupclk:
489 if (!IS_ERR(phy->wkupclk)) 500 if (!IS_ERR(phy->wkupclk))
490 clk_disable_unprepare(phy->wkupclk); 501 clk_disable_unprepare(phy->wkupclk);
491 502
492err2: 503disable_refclk:
493 if (!IS_ERR(phy->refclk)) 504 if (!IS_ERR(phy->refclk))
494 clk_disable_unprepare(phy->refclk); 505 clk_disable_unprepare(phy->refclk);
495 506
496 ti_pipe3_disable_refclk(phy);
497err1:
498 spin_unlock_irqrestore(&phy->lock, flags);
499 return ret; 507 return ret;
500} 508}
501 509
502static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy) 510static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy)
503{ 511{
504 unsigned long flags;
505
506 spin_lock_irqsave(&phy->lock, flags);
507 if (!phy->enabled) {
508 spin_unlock_irqrestore(&phy->lock, flags);
509 return;
510 }
511
512 if (!IS_ERR(phy->wkupclk)) 512 if (!IS_ERR(phy->wkupclk))
513 clk_disable_unprepare(phy->wkupclk); 513 clk_disable_unprepare(phy->wkupclk);
514 /* Don't disable refclk for SATA PHY due to Errata i783 */ 514 if (!IS_ERR(phy->refclk)) {
515 if (!of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata")) 515 clk_disable_unprepare(phy->refclk);
516 ti_pipe3_disable_refclk(phy); 516 /*
517 * SATA refclk needs an additional disable as we left it
518 * on in probe to avoid Errata i783
519 */
520 if (phy->sata_refclk_enabled) {
521 clk_disable_unprepare(phy->refclk);
522 phy->sata_refclk_enabled = false;
523 }
524 }
525
517 if (!IS_ERR(phy->div_clk)) 526 if (!IS_ERR(phy->div_clk))
518 clk_disable_unprepare(phy->div_clk); 527 clk_disable_unprepare(phy->div_clk);
519 phy->enabled = false;
520 spin_unlock_irqrestore(&phy->lock, flags);
521} 528}
522 529
523static int ti_pipe3_runtime_suspend(struct device *dev)
524{
525 struct ti_pipe3 *phy = dev_get_drvdata(dev);
526
527 ti_pipe3_disable_clocks(phy);
528 return 0;
529}
530
531static int ti_pipe3_runtime_resume(struct device *dev)
532{
533 struct ti_pipe3 *phy = dev_get_drvdata(dev);
534 int ret = 0;
535
536 ret = ti_pipe3_enable_clocks(phy);
537 return ret;
538}
539
540static int ti_pipe3_suspend(struct device *dev)
541{
542 struct ti_pipe3 *phy = dev_get_drvdata(dev);
543
544 ti_pipe3_disable_clocks(phy);
545 return 0;
546}
547
548static int ti_pipe3_resume(struct device *dev)
549{
550 struct ti_pipe3 *phy = dev_get_drvdata(dev);
551 int ret;
552
553 ret = ti_pipe3_enable_clocks(phy);
554 if (ret)
555 return ret;
556
557 pm_runtime_disable(dev);
558 pm_runtime_set_active(dev);
559 pm_runtime_enable(dev);
560 return 0;
561}
562#endif
563
564static const struct dev_pm_ops ti_pipe3_pm_ops = {
565 SET_RUNTIME_PM_OPS(ti_pipe3_runtime_suspend,
566 ti_pipe3_runtime_resume, NULL)
567 SET_SYSTEM_SLEEP_PM_OPS(ti_pipe3_suspend, ti_pipe3_resume)
568};
569
570static const struct of_device_id ti_pipe3_id_table[] = { 530static const struct of_device_id ti_pipe3_id_table[] = {
571 { 531 {
572 .compatible = "ti,phy-usb3", 532 .compatible = "ti,phy-usb3",
@@ -592,7 +552,6 @@ static struct platform_driver ti_pipe3_driver = {
592 .remove = ti_pipe3_remove, 552 .remove = ti_pipe3_remove,
593 .driver = { 553 .driver = {
594 .name = "ti-pipe3", 554 .name = "ti-pipe3",
595 .pm = &ti_pipe3_pm_ops,
596 .of_match_table = ti_pipe3_id_table, 555 .of_match_table = ti_pipe3_id_table,
597 }, 556 },
598}; 557};
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index efcf2a2b3975..6177315ab74e 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -473,6 +473,8 @@ static void bcm2835_gpio_irq_disable(struct irq_data *data)
473 473
474 spin_lock_irqsave(&pc->irq_lock[bank], flags); 474 spin_lock_irqsave(&pc->irq_lock[bank], flags);
475 bcm2835_gpio_irq_config(pc, gpio, false); 475 bcm2835_gpio_irq_config(pc, gpio, false);
476 /* Clear events that were latched prior to clearing event sources */
477 bcm2835_gpio_set_bit(pc, GPEDS0, gpio);
476 clear_bit(offset, &pc->enabled_irq_map[bank]); 478 clear_bit(offset, &pc->enabled_irq_map[bank]);
477 spin_unlock_irqrestore(&pc->irq_lock[bank], flags); 479 spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
478} 480}
diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
index 5fd4437cee15..88a7fac11bd4 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
@@ -403,14 +403,13 @@ static int imx1_pinconf_set(struct pinctrl_dev *pctldev,
403 unsigned num_configs) 403 unsigned num_configs)
404{ 404{
405 struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev); 405 struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
406 const struct imx1_pinctrl_soc_info *info = ipctl->info;
407 int i; 406 int i;
408 407
409 for (i = 0; i != num_configs; ++i) { 408 for (i = 0; i != num_configs; ++i) {
410 imx1_write_bit(ipctl, pin_id, configs[i] & 0x01, MX1_PUEN); 409 imx1_write_bit(ipctl, pin_id, configs[i] & 0x01, MX1_PUEN);
411 410
412 dev_dbg(ipctl->dev, "pinconf set pullup pin %s\n", 411 dev_dbg(ipctl->dev, "pinconf set pullup pin %s\n",
413 info->pins[pin_id].name); 412 pin_desc_get(pctldev, pin_id)->name);
414 } 413 }
415 414
416 return 0; 415 return 0;
diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c
index 557d0f2a3031..97681fac082e 100644
--- a/drivers/pinctrl/nomadik/pinctrl-abx500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c
@@ -787,7 +787,6 @@ static const struct pinmux_ops abx500_pinmux_ops = {
787 .set_mux = abx500_pmx_set, 787 .set_mux = abx500_pmx_set,
788 .gpio_request_enable = abx500_gpio_request_enable, 788 .gpio_request_enable = abx500_gpio_request_enable,
789 .gpio_disable_free = abx500_gpio_disable_free, 789 .gpio_disable_free = abx500_gpio_disable_free,
790 .strict = true,
791}; 790};
792 791
793static int abx500_get_groups_cnt(struct pinctrl_dev *pctldev) 792static int abx500_get_groups_cnt(struct pinctrl_dev *pctldev)
diff --git a/drivers/pinctrl/pinctrl-lpc18xx.c b/drivers/pinctrl/pinctrl-lpc18xx.c
index ef0b697639a7..347c763a6a78 100644
--- a/drivers/pinctrl/pinctrl-lpc18xx.c
+++ b/drivers/pinctrl/pinctrl-lpc18xx.c
@@ -823,7 +823,7 @@ static int lpc18xx_pconf_set_i2c0(struct pinctrl_dev *pctldev,
823 break; 823 break;
824 824
825 case PIN_CONFIG_INPUT_SCHMITT_ENABLE: 825 case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
826 if (param) 826 if (param_val)
827 *reg &= ~(LPC18XX_SCU_I2C0_ZIF << shift); 827 *reg &= ~(LPC18XX_SCU_I2C0_ZIF << shift);
828 else 828 else
829 *reg |= (LPC18XX_SCU_I2C0_ZIF << shift); 829 *reg |= (LPC18XX_SCU_I2C0_ZIF << shift);
@@ -876,7 +876,7 @@ static int lpc18xx_pconf_set_pin(struct pinctrl_dev *pctldev,
876 break; 876 break;
877 877
878 case PIN_CONFIG_INPUT_SCHMITT_ENABLE: 878 case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
879 if (param) 879 if (param_val)
880 *reg &= ~LPC18XX_SCU_PIN_ZIF; 880 *reg &= ~LPC18XX_SCU_PIN_ZIF;
881 else 881 else
882 *reg |= LPC18XX_SCU_PIN_ZIF; 882 *reg |= LPC18XX_SCU_PIN_ZIF;
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index b2de09d3b1a0..0b8d480171a3 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1760,7 +1760,8 @@ static int pcs_irq_init_chained_handler(struct pcs_device *pcs,
1760 int res; 1760 int res;
1761 1761
1762 res = request_irq(pcs_soc->irq, pcs_irq_handler, 1762 res = request_irq(pcs_soc->irq, pcs_irq_handler,
1763 IRQF_SHARED | IRQF_NO_SUSPEND, 1763 IRQF_SHARED | IRQF_NO_SUSPEND |
1764 IRQF_NO_THREAD,
1764 name, pcs_soc); 1765 name, pcs_soc);
1765 if (res) { 1766 if (res) {
1766 pcs_soc->irq = -1; 1767 pcs_soc->irq = -1;
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index 3dd5a3b2ac62..c760bf43d116 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -33,11 +33,6 @@
33#include "../core.h" 33#include "../core.h"
34#include "pinctrl-samsung.h" 34#include "pinctrl-samsung.h"
35 35
36#define GROUP_SUFFIX "-grp"
37#define GSUFFIX_LEN sizeof(GROUP_SUFFIX)
38#define FUNCTION_SUFFIX "-mux"
39#define FSUFFIX_LEN sizeof(FUNCTION_SUFFIX)
40
41/* list of all possible config options supported */ 36/* list of all possible config options supported */
42static struct pin_config { 37static struct pin_config {
43 const char *property; 38 const char *property;
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index c7508d5f6886..0874cfee6889 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -224,7 +224,7 @@ struct sh_pfc_soc_info {
224 224
225/* PINMUX_GPIO_GP_ALL - Expand to a list of sh_pfc_pin entries */ 225/* PINMUX_GPIO_GP_ALL - Expand to a list of sh_pfc_pin entries */
226#define _GP_GPIO(bank, _pin, _name, sfx) \ 226#define _GP_GPIO(bank, _pin, _name, sfx) \
227 [(bank * 32) + _pin] = { \ 227 { \
228 .pin = (bank * 32) + _pin, \ 228 .pin = (bank * 32) + _pin, \
229 .name = __stringify(_name), \ 229 .name = __stringify(_name), \
230 .enum_id = _name##_DATA, \ 230 .enum_id = _name##_DATA, \
diff --git a/drivers/pinctrl/spear/pinctrl-spear.c b/drivers/pinctrl/spear/pinctrl-spear.c
index f87a5eaf75da..0afaf79a4e51 100644
--- a/drivers/pinctrl/spear/pinctrl-spear.c
+++ b/drivers/pinctrl/spear/pinctrl-spear.c
@@ -2,7 +2,7 @@
2 * Driver for the ST Microelectronics SPEAr pinmux 2 * Driver for the ST Microelectronics SPEAr pinmux
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.linux@gmail.com> 5 * Viresh Kumar <vireshk@kernel.org>
6 * 6 *
7 * Inspired from: 7 * Inspired from:
8 * - U300 Pinctl drivers 8 * - U300 Pinctl drivers
diff --git a/drivers/pinctrl/spear/pinctrl-spear.h b/drivers/pinctrl/spear/pinctrl-spear.h
index dc8bf85ecb2a..27c2cc8d83ad 100644
--- a/drivers/pinctrl/spear/pinctrl-spear.h
+++ b/drivers/pinctrl/spear/pinctrl-spear.h
@@ -2,7 +2,7 @@
2 * Driver header file for the ST Microelectronics SPEAr pinmux 2 * Driver header file for the ST Microelectronics SPEAr pinmux
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.linux@gmail.com> 5 * Viresh Kumar <vireshk@kernel.org>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/pinctrl/spear/pinctrl-spear1310.c b/drivers/pinctrl/spear/pinctrl-spear1310.c
index a7bdc537efa7..92611bb757ac 100644
--- a/drivers/pinctrl/spear/pinctrl-spear1310.c
+++ b/drivers/pinctrl/spear/pinctrl-spear1310.c
@@ -2,7 +2,7 @@
2 * Driver for the ST Microelectronics SPEAr1310 pinmux 2 * Driver for the ST Microelectronics SPEAr1310 pinmux
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.linux@gmail.com> 5 * Viresh Kumar <vireshk@kernel.org>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
@@ -2730,7 +2730,7 @@ static void __exit spear1310_pinctrl_exit(void)
2730} 2730}
2731module_exit(spear1310_pinctrl_exit); 2731module_exit(spear1310_pinctrl_exit);
2732 2732
2733MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); 2733MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
2734MODULE_DESCRIPTION("ST Microelectronics SPEAr1310 pinctrl driver"); 2734MODULE_DESCRIPTION("ST Microelectronics SPEAr1310 pinctrl driver");
2735MODULE_LICENSE("GPL v2"); 2735MODULE_LICENSE("GPL v2");
2736MODULE_DEVICE_TABLE(of, spear1310_pinctrl_of_match); 2736MODULE_DEVICE_TABLE(of, spear1310_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear1340.c b/drivers/pinctrl/spear/pinctrl-spear1340.c
index f43ec85a0328..f842e9dc40d0 100644
--- a/drivers/pinctrl/spear/pinctrl-spear1340.c
+++ b/drivers/pinctrl/spear/pinctrl-spear1340.c
@@ -2,7 +2,7 @@
2 * Driver for the ST Microelectronics SPEAr1340 pinmux 2 * Driver for the ST Microelectronics SPEAr1340 pinmux
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.linux@gmail.com> 5 * Viresh Kumar <vireshk@kernel.org>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
@@ -2046,7 +2046,7 @@ static void __exit spear1340_pinctrl_exit(void)
2046} 2046}
2047module_exit(spear1340_pinctrl_exit); 2047module_exit(spear1340_pinctrl_exit);
2048 2048
2049MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); 2049MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
2050MODULE_DESCRIPTION("ST Microelectronics SPEAr1340 pinctrl driver"); 2050MODULE_DESCRIPTION("ST Microelectronics SPEAr1340 pinctrl driver");
2051MODULE_LICENSE("GPL v2"); 2051MODULE_LICENSE("GPL v2");
2052MODULE_DEVICE_TABLE(of, spear1340_pinctrl_of_match); 2052MODULE_DEVICE_TABLE(of, spear1340_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear300.c b/drivers/pinctrl/spear/pinctrl-spear300.c
index da8990a8eeef..d998a2ccff48 100644
--- a/drivers/pinctrl/spear/pinctrl-spear300.c
+++ b/drivers/pinctrl/spear/pinctrl-spear300.c
@@ -2,7 +2,7 @@
2 * Driver for the ST Microelectronics SPEAr300 pinmux 2 * Driver for the ST Microelectronics SPEAr300 pinmux
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.linux@gmail.com> 5 * Viresh Kumar <vireshk@kernel.org>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
@@ -703,7 +703,7 @@ static void __exit spear300_pinctrl_exit(void)
703} 703}
704module_exit(spear300_pinctrl_exit); 704module_exit(spear300_pinctrl_exit);
705 705
706MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); 706MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
707MODULE_DESCRIPTION("ST Microelectronics SPEAr300 pinctrl driver"); 707MODULE_DESCRIPTION("ST Microelectronics SPEAr300 pinctrl driver");
708MODULE_LICENSE("GPL v2"); 708MODULE_LICENSE("GPL v2");
709MODULE_DEVICE_TABLE(of, spear300_pinctrl_of_match); 709MODULE_DEVICE_TABLE(of, spear300_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear310.c b/drivers/pinctrl/spear/pinctrl-spear310.c
index 31ede51e819b..609b18aceb16 100644
--- a/drivers/pinctrl/spear/pinctrl-spear310.c
+++ b/drivers/pinctrl/spear/pinctrl-spear310.c
@@ -2,7 +2,7 @@
2 * Driver for the ST Microelectronics SPEAr310 pinmux 2 * Driver for the ST Microelectronics SPEAr310 pinmux
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.linux@gmail.com> 5 * Viresh Kumar <vireshk@kernel.org>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
@@ -426,7 +426,7 @@ static void __exit spear310_pinctrl_exit(void)
426} 426}
427module_exit(spear310_pinctrl_exit); 427module_exit(spear310_pinctrl_exit);
428 428
429MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); 429MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
430MODULE_DESCRIPTION("ST Microelectronics SPEAr310 pinctrl driver"); 430MODULE_DESCRIPTION("ST Microelectronics SPEAr310 pinctrl driver");
431MODULE_LICENSE("GPL v2"); 431MODULE_LICENSE("GPL v2");
432MODULE_DEVICE_TABLE(of, spear310_pinctrl_of_match); 432MODULE_DEVICE_TABLE(of, spear310_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear320.c b/drivers/pinctrl/spear/pinctrl-spear320.c
index 506e40b641e0..c07114431bd4 100644
--- a/drivers/pinctrl/spear/pinctrl-spear320.c
+++ b/drivers/pinctrl/spear/pinctrl-spear320.c
@@ -2,7 +2,7 @@
2 * Driver for the ST Microelectronics SPEAr320 pinmux 2 * Driver for the ST Microelectronics SPEAr320 pinmux
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.linux@gmail.com> 5 * Viresh Kumar <vireshk@kernel.org>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
@@ -3467,7 +3467,7 @@ static void __exit spear320_pinctrl_exit(void)
3467} 3467}
3468module_exit(spear320_pinctrl_exit); 3468module_exit(spear320_pinctrl_exit);
3469 3469
3470MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); 3470MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
3471MODULE_DESCRIPTION("ST Microelectronics SPEAr320 pinctrl driver"); 3471MODULE_DESCRIPTION("ST Microelectronics SPEAr320 pinctrl driver");
3472MODULE_LICENSE("GPL v2"); 3472MODULE_LICENSE("GPL v2");
3473MODULE_DEVICE_TABLE(of, spear320_pinctrl_of_match); 3473MODULE_DEVICE_TABLE(of, spear320_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear3xx.c b/drivers/pinctrl/spear/pinctrl-spear3xx.c
index 12ee21af766b..d3119aafe709 100644
--- a/drivers/pinctrl/spear/pinctrl-spear3xx.c
+++ b/drivers/pinctrl/spear/pinctrl-spear3xx.c
@@ -2,7 +2,7 @@
2 * Driver for the ST Microelectronics SPEAr3xx pinmux 2 * Driver for the ST Microelectronics SPEAr3xx pinmux
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.linux@gmail.com> 5 * Viresh Kumar <vireshk@kernel.org>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/pinctrl/spear/pinctrl-spear3xx.h b/drivers/pinctrl/spear/pinctrl-spear3xx.h
index 7860b36053c4..ce19dcf8f08b 100644
--- a/drivers/pinctrl/spear/pinctrl-spear3xx.h
+++ b/drivers/pinctrl/spear/pinctrl-spear3xx.h
@@ -2,7 +2,7 @@
2 * Header file for the ST Microelectronics SPEAr3xx pinmux 2 * Header file for the ST Microelectronics SPEAr3xx pinmux
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.linux@gmail.com> 5 * Viresh Kumar <vireshk@kernel.org>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index cb1329919527..3271cd1abe7c 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -4,7 +4,6 @@
4 4
5menuconfig CHROME_PLATFORMS 5menuconfig CHROME_PLATFORMS
6 bool "Platform support for Chrome hardware" 6 bool "Platform support for Chrome hardware"
7 depends on X86 || ARM
8 ---help--- 7 ---help---
9 Say Y here to get to see options for platform support for 8 Say Y here to get to see options for platform support for
10 various Chromebooks and Chromeboxes. This option alone does 9 various Chromebooks and Chromeboxes. This option alone does
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index ed317ccac4a2..aaeeae81e3a9 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -309,12 +309,15 @@ static const struct dmi_system_id dell_quirks[] __initconst = {
309static struct calling_interface_buffer *buffer; 309static struct calling_interface_buffer *buffer;
310static DEFINE_MUTEX(buffer_mutex); 310static DEFINE_MUTEX(buffer_mutex);
311 311
312static int hwswitch_state; 312static void clear_buffer(void)
313{
314 memset(buffer, 0, sizeof(struct calling_interface_buffer));
315}
313 316
314static void get_buffer(void) 317static void get_buffer(void)
315{ 318{
316 mutex_lock(&buffer_mutex); 319 mutex_lock(&buffer_mutex);
317 memset(buffer, 0, sizeof(struct calling_interface_buffer)); 320 clear_buffer();
318} 321}
319 322
320static void release_buffer(void) 323static void release_buffer(void)
@@ -548,21 +551,41 @@ static int dell_rfkill_set(void *data, bool blocked)
548 int disable = blocked ? 1 : 0; 551 int disable = blocked ? 1 : 0;
549 unsigned long radio = (unsigned long)data; 552 unsigned long radio = (unsigned long)data;
550 int hwswitch_bit = (unsigned long)data - 1; 553 int hwswitch_bit = (unsigned long)data - 1;
554 int hwswitch;
555 int status;
556 int ret;
551 557
552 get_buffer(); 558 get_buffer();
559
560 dell_send_request(buffer, 17, 11);
561 ret = buffer->output[0];
562 status = buffer->output[1];
563
564 if (ret != 0)
565 goto out;
566
567 clear_buffer();
568
569 buffer->input[0] = 0x2;
553 dell_send_request(buffer, 17, 11); 570 dell_send_request(buffer, 17, 11);
571 ret = buffer->output[0];
572 hwswitch = buffer->output[1];
554 573
555 /* If the hardware switch controls this radio, and the hardware 574 /* If the hardware switch controls this radio, and the hardware
556 switch is disabled, always disable the radio */ 575 switch is disabled, always disable the radio */
557 if ((hwswitch_state & BIT(hwswitch_bit)) && 576 if (ret == 0 && (hwswitch & BIT(hwswitch_bit)) &&
558 !(buffer->output[1] & BIT(16))) 577 (status & BIT(0)) && !(status & BIT(16)))
559 disable = 1; 578 disable = 1;
560 579
580 clear_buffer();
581
561 buffer->input[0] = (1 | (radio<<8) | (disable << 16)); 582 buffer->input[0] = (1 | (radio<<8) | (disable << 16));
562 dell_send_request(buffer, 17, 11); 583 dell_send_request(buffer, 17, 11);
584 ret = buffer->output[0];
563 585
586 out:
564 release_buffer(); 587 release_buffer();
565 return 0; 588 return dell_smi_error(ret);
566} 589}
567 590
568/* Must be called with the buffer held */ 591/* Must be called with the buffer held */
@@ -572,6 +595,7 @@ static void dell_rfkill_update_sw_state(struct rfkill *rfkill, int radio,
572 if (status & BIT(0)) { 595 if (status & BIT(0)) {
573 /* Has hw-switch, sync sw_state to BIOS */ 596 /* Has hw-switch, sync sw_state to BIOS */
574 int block = rfkill_blocked(rfkill); 597 int block = rfkill_blocked(rfkill);
598 clear_buffer();
575 buffer->input[0] = (1 | (radio << 8) | (block << 16)); 599 buffer->input[0] = (1 | (radio << 8) | (block << 16));
576 dell_send_request(buffer, 17, 11); 600 dell_send_request(buffer, 17, 11);
577 } else { 601 } else {
@@ -581,23 +605,43 @@ static void dell_rfkill_update_sw_state(struct rfkill *rfkill, int radio,
581} 605}
582 606
583static void dell_rfkill_update_hw_state(struct rfkill *rfkill, int radio, 607static void dell_rfkill_update_hw_state(struct rfkill *rfkill, int radio,
584 int status) 608 int status, int hwswitch)
585{ 609{
586 if (hwswitch_state & (BIT(radio - 1))) 610 if (hwswitch & (BIT(radio - 1)))
587 rfkill_set_hw_state(rfkill, !(status & BIT(16))); 611 rfkill_set_hw_state(rfkill, !(status & BIT(16)));
588} 612}
589 613
590static void dell_rfkill_query(struct rfkill *rfkill, void *data) 614static void dell_rfkill_query(struct rfkill *rfkill, void *data)
591{ 615{
616 int radio = ((unsigned long)data & 0xF);
617 int hwswitch;
592 int status; 618 int status;
619 int ret;
593 620
594 get_buffer(); 621 get_buffer();
622
595 dell_send_request(buffer, 17, 11); 623 dell_send_request(buffer, 17, 11);
624 ret = buffer->output[0];
596 status = buffer->output[1]; 625 status = buffer->output[1];
597 626
598 dell_rfkill_update_hw_state(rfkill, (unsigned long)data, status); 627 if (ret != 0 || !(status & BIT(0))) {
628 release_buffer();
629 return;
630 }
631
632 clear_buffer();
633
634 buffer->input[0] = 0x2;
635 dell_send_request(buffer, 17, 11);
636 ret = buffer->output[0];
637 hwswitch = buffer->output[1];
599 638
600 release_buffer(); 639 release_buffer();
640
641 if (ret != 0)
642 return;
643
644 dell_rfkill_update_hw_state(rfkill, radio, status, hwswitch);
601} 645}
602 646
603static const struct rfkill_ops dell_rfkill_ops = { 647static const struct rfkill_ops dell_rfkill_ops = {
@@ -609,13 +653,27 @@ static struct dentry *dell_laptop_dir;
609 653
610static int dell_debugfs_show(struct seq_file *s, void *data) 654static int dell_debugfs_show(struct seq_file *s, void *data)
611{ 655{
656 int hwswitch_state;
657 int hwswitch_ret;
612 int status; 658 int status;
659 int ret;
613 660
614 get_buffer(); 661 get_buffer();
662
615 dell_send_request(buffer, 17, 11); 663 dell_send_request(buffer, 17, 11);
664 ret = buffer->output[0];
616 status = buffer->output[1]; 665 status = buffer->output[1];
666
667 clear_buffer();
668
669 buffer->input[0] = 0x2;
670 dell_send_request(buffer, 17, 11);
671 hwswitch_ret = buffer->output[0];
672 hwswitch_state = buffer->output[1];
673
617 release_buffer(); 674 release_buffer();
618 675
676 seq_printf(s, "return:\t%d\n", ret);
619 seq_printf(s, "status:\t0x%X\n", status); 677 seq_printf(s, "status:\t0x%X\n", status);
620 seq_printf(s, "Bit 0 : Hardware switch supported: %lu\n", 678 seq_printf(s, "Bit 0 : Hardware switch supported: %lu\n",
621 status & BIT(0)); 679 status & BIT(0));
@@ -657,7 +715,8 @@ static int dell_debugfs_show(struct seq_file *s, void *data)
657 seq_printf(s, "Bit 21: WiGig is blocked: %lu\n", 715 seq_printf(s, "Bit 21: WiGig is blocked: %lu\n",
658 (status & BIT(21)) >> 21); 716 (status & BIT(21)) >> 21);
659 717
660 seq_printf(s, "\nhwswitch_state:\t0x%X\n", hwswitch_state); 718 seq_printf(s, "\nhwswitch_return:\t%d\n", hwswitch_ret);
719 seq_printf(s, "hwswitch_state:\t0x%X\n", hwswitch_state);
661 seq_printf(s, "Bit 0 : Wifi controlled by switch: %lu\n", 720 seq_printf(s, "Bit 0 : Wifi controlled by switch: %lu\n",
662 hwswitch_state & BIT(0)); 721 hwswitch_state & BIT(0));
663 seq_printf(s, "Bit 1 : Bluetooth controlled by switch: %lu\n", 722 seq_printf(s, "Bit 1 : Bluetooth controlled by switch: %lu\n",
@@ -693,25 +752,43 @@ static const struct file_operations dell_debugfs_fops = {
693 752
694static void dell_update_rfkill(struct work_struct *ignored) 753static void dell_update_rfkill(struct work_struct *ignored)
695{ 754{
755 int hwswitch = 0;
696 int status; 756 int status;
757 int ret;
697 758
698 get_buffer(); 759 get_buffer();
760
699 dell_send_request(buffer, 17, 11); 761 dell_send_request(buffer, 17, 11);
762 ret = buffer->output[0];
700 status = buffer->output[1]; 763 status = buffer->output[1];
701 764
765 if (ret != 0)
766 goto out;
767
768 clear_buffer();
769
770 buffer->input[0] = 0x2;
771 dell_send_request(buffer, 17, 11);
772 ret = buffer->output[0];
773
774 if (ret == 0 && (status & BIT(0)))
775 hwswitch = buffer->output[1];
776
702 if (wifi_rfkill) { 777 if (wifi_rfkill) {
703 dell_rfkill_update_hw_state(wifi_rfkill, 1, status); 778 dell_rfkill_update_hw_state(wifi_rfkill, 1, status, hwswitch);
704 dell_rfkill_update_sw_state(wifi_rfkill, 1, status); 779 dell_rfkill_update_sw_state(wifi_rfkill, 1, status);
705 } 780 }
706 if (bluetooth_rfkill) { 781 if (bluetooth_rfkill) {
707 dell_rfkill_update_hw_state(bluetooth_rfkill, 2, status); 782 dell_rfkill_update_hw_state(bluetooth_rfkill, 2, status,
783 hwswitch);
708 dell_rfkill_update_sw_state(bluetooth_rfkill, 2, status); 784 dell_rfkill_update_sw_state(bluetooth_rfkill, 2, status);
709 } 785 }
710 if (wwan_rfkill) { 786 if (wwan_rfkill) {
711 dell_rfkill_update_hw_state(wwan_rfkill, 3, status); 787 dell_rfkill_update_hw_state(wwan_rfkill, 3, status, hwswitch);
712 dell_rfkill_update_sw_state(wwan_rfkill, 3, status); 788 dell_rfkill_update_sw_state(wwan_rfkill, 3, status);
713 } 789 }
714 790
791 out:
715 release_buffer(); 792 release_buffer();
716} 793}
717static DECLARE_DELAYED_WORK(dell_rfkill_work, dell_update_rfkill); 794static DECLARE_DELAYED_WORK(dell_rfkill_work, dell_update_rfkill);
@@ -773,21 +850,17 @@ static int __init dell_setup_rfkill(void)
773 850
774 get_buffer(); 851 get_buffer();
775 dell_send_request(buffer, 17, 11); 852 dell_send_request(buffer, 17, 11);
853 ret = buffer->output[0];
776 status = buffer->output[1]; 854 status = buffer->output[1];
777 buffer->input[0] = 0x2;
778 dell_send_request(buffer, 17, 11);
779 hwswitch_state = buffer->output[1];
780 release_buffer(); 855 release_buffer();
781 856
782 if (!(status & BIT(0))) { 857 /* dell wireless info smbios call is not supported */
783 if (force_rfkill) { 858 if (ret != 0)
784 /* No hwsitch, clear all hw-controlled bits */ 859 return 0;
785 hwswitch_state &= ~7; 860
786 } else { 861 /* rfkill is only tested on laptops with a hwswitch */
787 /* rfkill is only tested on laptops with a hwswitch */ 862 if (!(status & BIT(0)) && !force_rfkill)
788 return 0; 863 return 0;
789 }
790 }
791 864
792 if ((status & (1<<2|1<<8)) == (1<<2|1<<8)) { 865 if ((status & (1<<2|1<<8)) == (1<<2|1<<8)) {
793 wifi_rfkill = rfkill_alloc("dell-wifi", &platform_device->dev, 866 wifi_rfkill = rfkill_alloc("dell-wifi", &platform_device->dev,
@@ -932,47 +1005,50 @@ static void dell_cleanup_rfkill(void)
932 1005
933static int dell_send_intensity(struct backlight_device *bd) 1006static int dell_send_intensity(struct backlight_device *bd)
934{ 1007{
935 int ret = 0; 1008 int token;
1009 int ret;
1010
1011 token = find_token_location(BRIGHTNESS_TOKEN);
1012 if (token == -1)
1013 return -ENODEV;
936 1014
937 get_buffer(); 1015 get_buffer();
938 buffer->input[0] = find_token_location(BRIGHTNESS_TOKEN); 1016 buffer->input[0] = token;
939 buffer->input[1] = bd->props.brightness; 1017 buffer->input[1] = bd->props.brightness;
940 1018
941 if (buffer->input[0] == -1) {
942 ret = -ENODEV;
943 goto out;
944 }
945
946 if (power_supply_is_system_supplied() > 0) 1019 if (power_supply_is_system_supplied() > 0)
947 dell_send_request(buffer, 1, 2); 1020 dell_send_request(buffer, 1, 2);
948 else 1021 else
949 dell_send_request(buffer, 1, 1); 1022 dell_send_request(buffer, 1, 1);
950 1023
951 out: 1024 ret = dell_smi_error(buffer->output[0]);
1025
952 release_buffer(); 1026 release_buffer();
953 return ret; 1027 return ret;
954} 1028}
955 1029
956static int dell_get_intensity(struct backlight_device *bd) 1030static int dell_get_intensity(struct backlight_device *bd)
957{ 1031{
958 int ret = 0; 1032 int token;
1033 int ret;
959 1034
960 get_buffer(); 1035 token = find_token_location(BRIGHTNESS_TOKEN);
961 buffer->input[0] = find_token_location(BRIGHTNESS_TOKEN); 1036 if (token == -1)
1037 return -ENODEV;
962 1038
963 if (buffer->input[0] == -1) { 1039 get_buffer();
964 ret = -ENODEV; 1040 buffer->input[0] = token;
965 goto out;
966 }
967 1041
968 if (power_supply_is_system_supplied() > 0) 1042 if (power_supply_is_system_supplied() > 0)
969 dell_send_request(buffer, 0, 2); 1043 dell_send_request(buffer, 0, 2);
970 else 1044 else
971 dell_send_request(buffer, 0, 1); 1045 dell_send_request(buffer, 0, 1);
972 1046
973 ret = buffer->output[1]; 1047 if (buffer->output[0])
1048 ret = dell_smi_error(buffer->output[0]);
1049 else
1050 ret = buffer->output[1];
974 1051
975 out:
976 release_buffer(); 1052 release_buffer();
977 return ret; 1053 return ret;
978} 1054}
@@ -2036,6 +2112,7 @@ static void kbd_led_exit(void)
2036static int __init dell_init(void) 2112static int __init dell_init(void)
2037{ 2113{
2038 int max_intensity = 0; 2114 int max_intensity = 0;
2115 int token;
2039 int ret; 2116 int ret;
2040 2117
2041 if (!dmi_check_system(dell_device_table)) 2118 if (!dmi_check_system(dell_device_table))
@@ -2094,13 +2171,15 @@ static int __init dell_init(void)
2094 if (acpi_video_get_backlight_type() != acpi_backlight_vendor) 2171 if (acpi_video_get_backlight_type() != acpi_backlight_vendor)
2095 return 0; 2172 return 0;
2096 2173
2097 get_buffer(); 2174 token = find_token_location(BRIGHTNESS_TOKEN);
2098 buffer->input[0] = find_token_location(BRIGHTNESS_TOKEN); 2175 if (token != -1) {
2099 if (buffer->input[0] != -1) { 2176 get_buffer();
2177 buffer->input[0] = token;
2100 dell_send_request(buffer, 0, 2); 2178 dell_send_request(buffer, 0, 2);
2101 max_intensity = buffer->output[3]; 2179 if (buffer->output[0] == 0)
2180 max_intensity = buffer->output[3];
2181 release_buffer();
2102 } 2182 }
2103 release_buffer();
2104 2183
2105 if (max_intensity) { 2184 if (max_intensity) {
2106 struct backlight_properties props; 2185 struct backlight_properties props;
diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c
index d734763dab69..105cfffe82c6 100644
--- a/drivers/platform/x86/intel_pmc_ipc.c
+++ b/drivers/platform/x86/intel_pmc_ipc.c
@@ -96,18 +96,18 @@ static struct intel_pmc_ipc_dev {
96 struct completion cmd_complete; 96 struct completion cmd_complete;
97 97
98 /* The following PMC BARs share the same ACPI device with the IPC */ 98 /* The following PMC BARs share the same ACPI device with the IPC */
99 void *acpi_io_base; 99 resource_size_t acpi_io_base;
100 int acpi_io_size; 100 int acpi_io_size;
101 struct platform_device *tco_dev; 101 struct platform_device *tco_dev;
102 102
103 /* gcr */ 103 /* gcr */
104 void *gcr_base; 104 resource_size_t gcr_base;
105 int gcr_size; 105 int gcr_size;
106 106
107 /* punit */ 107 /* punit */
108 void *punit_base; 108 resource_size_t punit_base;
109 int punit_size; 109 int punit_size;
110 void *punit_base2; 110 resource_size_t punit_base2;
111 int punit_size2; 111 int punit_size2;
112 struct platform_device *punit_dev; 112 struct platform_device *punit_dev;
113} ipcdev; 113} ipcdev;
@@ -210,10 +210,15 @@ static int intel_pmc_ipc_check_status(void)
210 return ret; 210 return ret;
211} 211}
212 212
213/* 213/**
214 * intel_pmc_ipc_simple_command 214 * intel_pmc_ipc_simple_command() - Simple IPC command
215 * @cmd: command 215 * @cmd: IPC command code.
216 * @sub: sub type 216 * @sub: IPC command sub type.
217 *
218 * Send a simple IPC command to PMC when don't need to specify
219 * input/output data and source/dest pointers.
220 *
221 * Return: an IPC error code or 0 on success.
217 */ 222 */
218int intel_pmc_ipc_simple_command(int cmd, int sub) 223int intel_pmc_ipc_simple_command(int cmd, int sub)
219{ 224{
@@ -232,16 +237,20 @@ int intel_pmc_ipc_simple_command(int cmd, int sub)
232} 237}
233EXPORT_SYMBOL_GPL(intel_pmc_ipc_simple_command); 238EXPORT_SYMBOL_GPL(intel_pmc_ipc_simple_command);
234 239
235/* 240/**
236 * intel_pmc_ipc_raw_cmd 241 * intel_pmc_ipc_raw_cmd() - IPC command with data and pointers
237 * @cmd: command 242 * @cmd: IPC command code.
238 * @sub: sub type 243 * @sub: IPC command sub type.
239 * @in: input data 244 * @in: input data of this IPC command.
240 * @inlen: input length in bytes 245 * @inlen: input data length in bytes.
241 * @out: output data 246 * @out: output data of this IPC command.
242 * @outlen: output length in dwords 247 * @outlen: output data length in dwords.
243 * @sptr: data writing to SPTR register 248 * @sptr: data writing to SPTR register.
244 * @dptr: data writing to DPTR register 249 * @dptr: data writing to DPTR register.
250 *
251 * Send an IPC command to PMC with input/output data and source/dest pointers.
252 *
253 * Return: an IPC error code or 0 on success.
245 */ 254 */
246int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out, 255int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out,
247 u32 outlen, u32 dptr, u32 sptr) 256 u32 outlen, u32 dptr, u32 sptr)
@@ -278,14 +287,18 @@ int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out,
278} 287}
279EXPORT_SYMBOL_GPL(intel_pmc_ipc_raw_cmd); 288EXPORT_SYMBOL_GPL(intel_pmc_ipc_raw_cmd);
280 289
281/* 290/**
282 * intel_pmc_ipc_command 291 * intel_pmc_ipc_command() - IPC command with input/output data
283 * @cmd: command 292 * @cmd: IPC command code.
284 * @sub: sub type 293 * @sub: IPC command sub type.
285 * @in: input data 294 * @in: input data of this IPC command.
286 * @inlen: input length in bytes 295 * @inlen: input data length in bytes.
287 * @out: output data 296 * @out: output data of this IPC command.
288 * @outlen: output length in dwords 297 * @outlen: output data length in dwords.
298 *
299 * Send an IPC command to PMC with input/output data.
300 *
301 * Return: an IPC error code or 0 on success.
289 */ 302 */
290int intel_pmc_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen, 303int intel_pmc_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen,
291 u32 *out, u32 outlen) 304 u32 *out, u32 outlen)
@@ -480,11 +493,11 @@ static int ipc_create_punit_device(void)
480 pdev->dev.parent = ipcdev.dev; 493 pdev->dev.parent = ipcdev.dev;
481 494
482 res = punit_res; 495 res = punit_res;
483 res->start = (resource_size_t)ipcdev.punit_base; 496 res->start = ipcdev.punit_base;
484 res->end = res->start + ipcdev.punit_size - 1; 497 res->end = res->start + ipcdev.punit_size - 1;
485 498
486 res = punit_res + PUNIT_RESOURCE_INTER; 499 res = punit_res + PUNIT_RESOURCE_INTER;
487 res->start = (resource_size_t)ipcdev.punit_base2; 500 res->start = ipcdev.punit_base2;
488 res->end = res->start + ipcdev.punit_size2 - 1; 501 res->end = res->start + ipcdev.punit_size2 - 1;
489 502
490 ret = platform_device_add_resources(pdev, punit_res, 503 ret = platform_device_add_resources(pdev, punit_res,
@@ -522,15 +535,15 @@ static int ipc_create_tco_device(void)
522 pdev->dev.parent = ipcdev.dev; 535 pdev->dev.parent = ipcdev.dev;
523 536
524 res = tco_res + TCO_RESOURCE_ACPI_IO; 537 res = tco_res + TCO_RESOURCE_ACPI_IO;
525 res->start = (resource_size_t)ipcdev.acpi_io_base + TCO_BASE_OFFSET; 538 res->start = ipcdev.acpi_io_base + TCO_BASE_OFFSET;
526 res->end = res->start + TCO_REGS_SIZE - 1; 539 res->end = res->start + TCO_REGS_SIZE - 1;
527 540
528 res = tco_res + TCO_RESOURCE_SMI_EN_IO; 541 res = tco_res + TCO_RESOURCE_SMI_EN_IO;
529 res->start = (resource_size_t)ipcdev.acpi_io_base + SMI_EN_OFFSET; 542 res->start = ipcdev.acpi_io_base + SMI_EN_OFFSET;
530 res->end = res->start + SMI_EN_SIZE - 1; 543 res->end = res->start + SMI_EN_SIZE - 1;
531 544
532 res = tco_res + TCO_RESOURCE_GCR_MEM; 545 res = tco_res + TCO_RESOURCE_GCR_MEM;
533 res->start = (resource_size_t)ipcdev.gcr_base; 546 res->start = ipcdev.gcr_base;
534 res->end = res->start + ipcdev.gcr_size - 1; 547 res->end = res->start + ipcdev.gcr_size - 1;
535 548
536 ret = platform_device_add_resources(pdev, tco_res, ARRAY_SIZE(tco_res)); 549 ret = platform_device_add_resources(pdev, tco_res, ARRAY_SIZE(tco_res));
@@ -589,7 +602,7 @@ static int ipc_plat_get_res(struct platform_device *pdev)
589 return -ENXIO; 602 return -ENXIO;
590 } 603 }
591 size = resource_size(res); 604 size = resource_size(res);
592 ipcdev.acpi_io_base = (void *)res->start; 605 ipcdev.acpi_io_base = res->start;
593 ipcdev.acpi_io_size = size; 606 ipcdev.acpi_io_size = size;
594 dev_info(&pdev->dev, "io res: %llx %x\n", 607 dev_info(&pdev->dev, "io res: %llx %x\n",
595 (long long)res->start, (int)resource_size(res)); 608 (long long)res->start, (int)resource_size(res));
@@ -601,7 +614,7 @@ static int ipc_plat_get_res(struct platform_device *pdev)
601 return -ENXIO; 614 return -ENXIO;
602 } 615 }
603 size = resource_size(res); 616 size = resource_size(res);
604 ipcdev.punit_base = (void *)res->start; 617 ipcdev.punit_base = res->start;
605 ipcdev.punit_size = size; 618 ipcdev.punit_size = size;
606 dev_info(&pdev->dev, "punit data res: %llx %x\n", 619 dev_info(&pdev->dev, "punit data res: %llx %x\n",
607 (long long)res->start, (int)resource_size(res)); 620 (long long)res->start, (int)resource_size(res));
@@ -613,7 +626,7 @@ static int ipc_plat_get_res(struct platform_device *pdev)
613 return -ENXIO; 626 return -ENXIO;
614 } 627 }
615 size = resource_size(res); 628 size = resource_size(res);
616 ipcdev.punit_base2 = (void *)res->start; 629 ipcdev.punit_base2 = res->start;
617 ipcdev.punit_size2 = size; 630 ipcdev.punit_size2 = size;
618 dev_info(&pdev->dev, "punit interface res: %llx %x\n", 631 dev_info(&pdev->dev, "punit interface res: %llx %x\n",
619 (long long)res->start, (int)resource_size(res)); 632 (long long)res->start, (int)resource_size(res));
@@ -637,7 +650,7 @@ static int ipc_plat_get_res(struct platform_device *pdev)
637 } 650 }
638 ipcdev.ipc_base = addr; 651 ipcdev.ipc_base = addr;
639 652
640 ipcdev.gcr_base = (void *)(res->start + size); 653 ipcdev.gcr_base = res->start + size;
641 ipcdev.gcr_size = PLAT_RESOURCE_GCR_SIZE; 654 ipcdev.gcr_size = PLAT_RESOURCE_GCR_SIZE;
642 dev_info(&pdev->dev, "ipc res: %llx %x\n", 655 dev_info(&pdev->dev, "ipc res: %llx %x\n",
643 (long long)res->start, (int)resource_size(res)); 656 (long long)res->start, (int)resource_size(res));
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index 001b199a8c33..187d1086d15c 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -216,13 +216,13 @@ static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id)
216 int nc; 216 int nc;
217 u32 offset = 0; 217 u32 offset = 0;
218 int err; 218 int err;
219 u8 cbuf[IPC_WWBUF_SIZE] = { }; 219 u8 cbuf[IPC_WWBUF_SIZE];
220 u32 *wbuf = (u32 *)&cbuf; 220 u32 *wbuf = (u32 *)&cbuf;
221 221
222 mutex_lock(&ipclock);
223
224 memset(cbuf, 0, sizeof(cbuf)); 222 memset(cbuf, 0, sizeof(cbuf));
225 223
224 mutex_lock(&ipclock);
225
226 if (ipcdev.pdev == NULL) { 226 if (ipcdev.pdev == NULL) {
227 mutex_unlock(&ipclock); 227 mutex_unlock(&ipclock);
228 return -ENODEV; 228 return -ENODEV;
diff --git a/drivers/power/max77693_charger.c b/drivers/power/max77693_charger.c
index 754879eb59f6..060cab5ae3aa 100644
--- a/drivers/power/max77693_charger.c
+++ b/drivers/power/max77693_charger.c
@@ -20,6 +20,7 @@
20#include <linux/power_supply.h> 20#include <linux/power_supply.h>
21#include <linux/regmap.h> 21#include <linux/regmap.h>
22#include <linux/mfd/max77693.h> 22#include <linux/mfd/max77693.h>
23#include <linux/mfd/max77693-common.h>
23#include <linux/mfd/max77693-private.h> 24#include <linux/mfd/max77693-private.h>
24 25
25#define MAX77693_CHARGER_NAME "max77693-charger" 26#define MAX77693_CHARGER_NAME "max77693-charger"
diff --git a/drivers/regulator/88pm800.c b/drivers/regulator/88pm800.c
index 832932bdc977..a62a89674fb5 100644
--- a/drivers/regulator/88pm800.c
+++ b/drivers/regulator/88pm800.c
@@ -78,7 +78,6 @@ struct pm800_regulator_info {
78}; 78};
79 79
80struct pm800_regulators { 80struct pm800_regulators {
81 struct regulator_dev *regulators[PM800_ID_RG_MAX];
82 struct pm80x_chip *chip; 81 struct pm80x_chip *chip;
83 struct regmap *map; 82 struct regmap *map;
84}; 83};
@@ -92,14 +91,16 @@ struct pm800_regulators {
92 * not the constant voltage table. 91 * not the constant voltage table.
93 * n_volt - Number of available selectors 92 * n_volt - Number of available selectors
94 */ 93 */
95#define PM800_BUCK(vreg, ereg, ebit, amax, volt_ranges, n_volt) \ 94#define PM800_BUCK(match, vreg, ereg, ebit, amax, volt_ranges, n_volt) \
96{ \ 95{ \
97 .desc = { \ 96 .desc = { \
98 .name = #vreg, \ 97 .name = #vreg, \
99 .ops = &pm800_volt_range_ops, \ 98 .of_match = of_match_ptr(#match), \
100 .type = REGULATOR_VOLTAGE, \ 99 .regulators_node = of_match_ptr("regulators"), \
101 .id = PM800_ID_##vreg, \ 100 .ops = &pm800_volt_range_ops, \
102 .owner = THIS_MODULE, \ 101 .type = REGULATOR_VOLTAGE, \
102 .id = PM800_ID_##vreg, \
103 .owner = THIS_MODULE, \
103 .n_voltages = n_volt, \ 104 .n_voltages = n_volt, \
104 .linear_ranges = volt_ranges, \ 105 .linear_ranges = volt_ranges, \
105 .n_linear_ranges = ARRAY_SIZE(volt_ranges), \ 106 .n_linear_ranges = ARRAY_SIZE(volt_ranges), \
@@ -108,7 +109,7 @@ struct pm800_regulators {
108 .enable_reg = PM800_##ereg, \ 109 .enable_reg = PM800_##ereg, \
109 .enable_mask = 1 << (ebit), \ 110 .enable_mask = 1 << (ebit), \
110 }, \ 111 }, \
111 .max_ua = (amax), \ 112 .max_ua = (amax), \
112} 113}
113 114
114/* 115/*
@@ -120,22 +121,24 @@ struct pm800_regulators {
120 * For all the LDOes, there are too many ranges. Using volt_table will be 121 * For all the LDOes, there are too many ranges. Using volt_table will be
121 * simpler and faster. 122 * simpler and faster.
122 */ 123 */
123#define PM800_LDO(vreg, ereg, ebit, amax, ldo_volt_table) \ 124#define PM800_LDO(match, vreg, ereg, ebit, amax, ldo_volt_table) \
124{ \ 125{ \
125 .desc = { \ 126 .desc = { \
126 .name = #vreg, \ 127 .name = #vreg, \
127 .ops = &pm800_volt_table_ops, \ 128 .of_match = of_match_ptr(#match), \
128 .type = REGULATOR_VOLTAGE, \ 129 .regulators_node = of_match_ptr("regulators"), \
129 .id = PM800_ID_##vreg, \ 130 .ops = &pm800_volt_table_ops, \
130 .owner = THIS_MODULE, \ 131 .type = REGULATOR_VOLTAGE, \
131 .n_voltages = ARRAY_SIZE(ldo_volt_table), \ 132 .id = PM800_ID_##vreg, \
132 .vsel_reg = PM800_##vreg##_VOUT, \ 133 .owner = THIS_MODULE, \
133 .vsel_mask = 0x1f, \ 134 .n_voltages = ARRAY_SIZE(ldo_volt_table), \
134 .enable_reg = PM800_##ereg, \ 135 .vsel_reg = PM800_##vreg##_VOUT, \
135 .enable_mask = 1 << (ebit), \ 136 .vsel_mask = 0xf, \
136 .volt_table = ldo_volt_table, \ 137 .enable_reg = PM800_##ereg, \
138 .enable_mask = 1 << (ebit), \
139 .volt_table = ldo_volt_table, \
137 }, \ 140 }, \
138 .max_ua = (amax), \ 141 .max_ua = (amax), \
139} 142}
140 143
141/* Ranges are sorted in ascending order. */ 144/* Ranges are sorted in ascending order. */
@@ -178,122 +181,66 @@ static int pm800_get_current_limit(struct regulator_dev *rdev)
178} 181}
179 182
180static struct regulator_ops pm800_volt_range_ops = { 183static struct regulator_ops pm800_volt_range_ops = {
181 .list_voltage = regulator_list_voltage_linear_range, 184 .list_voltage = regulator_list_voltage_linear_range,
182 .map_voltage = regulator_map_voltage_linear_range, 185 .map_voltage = regulator_map_voltage_linear_range,
183 .set_voltage_sel = regulator_set_voltage_sel_regmap, 186 .set_voltage_sel = regulator_set_voltage_sel_regmap,
184 .get_voltage_sel = regulator_get_voltage_sel_regmap, 187 .get_voltage_sel = regulator_get_voltage_sel_regmap,
185 .enable = regulator_enable_regmap, 188 .enable = regulator_enable_regmap,
186 .disable = regulator_disable_regmap, 189 .disable = regulator_disable_regmap,
187 .is_enabled = regulator_is_enabled_regmap, 190 .is_enabled = regulator_is_enabled_regmap,
188 .get_current_limit = pm800_get_current_limit, 191 .get_current_limit = pm800_get_current_limit,
189}; 192};
190 193
191static struct regulator_ops pm800_volt_table_ops = { 194static struct regulator_ops pm800_volt_table_ops = {
192 .list_voltage = regulator_list_voltage_table, 195 .list_voltage = regulator_list_voltage_table,
193 .map_voltage = regulator_map_voltage_iterate, 196 .map_voltage = regulator_map_voltage_iterate,
194 .set_voltage_sel = regulator_set_voltage_sel_regmap, 197 .set_voltage_sel = regulator_set_voltage_sel_regmap,
195 .get_voltage_sel = regulator_get_voltage_sel_regmap, 198 .get_voltage_sel = regulator_get_voltage_sel_regmap,
196 .enable = regulator_enable_regmap, 199 .enable = regulator_enable_regmap,
197 .disable = regulator_disable_regmap, 200 .disable = regulator_disable_regmap,
198 .is_enabled = regulator_is_enabled_regmap, 201 .is_enabled = regulator_is_enabled_regmap,
199 .get_current_limit = pm800_get_current_limit, 202 .get_current_limit = pm800_get_current_limit,
200}; 203};
201 204
202/* The array is indexed by id(PM800_ID_XXX) */ 205/* The array is indexed by id(PM800_ID_XXX) */
203static struct pm800_regulator_info pm800_regulator_info[] = { 206static struct pm800_regulator_info pm800_regulator_info[] = {
204 PM800_BUCK(BUCK1, BUCK_ENA, 0, 3000000, buck1_volt_range, 0x55), 207 PM800_BUCK(buck1, BUCK1, BUCK_ENA, 0, 3000000, buck1_volt_range, 0x55),
205 PM800_BUCK(BUCK2, BUCK_ENA, 1, 1200000, buck2_5_volt_range, 0x73), 208 PM800_BUCK(buck2, BUCK2, BUCK_ENA, 1, 1200000, buck2_5_volt_range, 0x73),
206 PM800_BUCK(BUCK3, BUCK_ENA, 2, 1200000, buck2_5_volt_range, 0x73), 209 PM800_BUCK(buck3, BUCK3, BUCK_ENA, 2, 1200000, buck2_5_volt_range, 0x73),
207 PM800_BUCK(BUCK4, BUCK_ENA, 3, 1200000, buck2_5_volt_range, 0x73), 210 PM800_BUCK(buck4, BUCK4, BUCK_ENA, 3, 1200000, buck2_5_volt_range, 0x73),
208 PM800_BUCK(BUCK5, BUCK_ENA, 4, 1200000, buck2_5_volt_range, 0x73), 211 PM800_BUCK(buck5, BUCK5, BUCK_ENA, 4, 1200000, buck2_5_volt_range, 0x73),
209 212
210 PM800_LDO(LDO1, LDO_ENA1_1, 0, 200000, ldo1_volt_table), 213 PM800_LDO(ldo1, LDO1, LDO_ENA1_1, 0, 200000, ldo1_volt_table),
211 PM800_LDO(LDO2, LDO_ENA1_1, 1, 10000, ldo2_volt_table), 214 PM800_LDO(ldo2, LDO2, LDO_ENA1_1, 1, 10000, ldo2_volt_table),
212 PM800_LDO(LDO3, LDO_ENA1_1, 2, 300000, ldo3_17_volt_table), 215 PM800_LDO(ldo3, LDO3, LDO_ENA1_1, 2, 300000, ldo3_17_volt_table),
213 PM800_LDO(LDO4, LDO_ENA1_1, 3, 300000, ldo3_17_volt_table), 216 PM800_LDO(ldo4, LDO4, LDO_ENA1_1, 3, 300000, ldo3_17_volt_table),
214 PM800_LDO(LDO5, LDO_ENA1_1, 4, 300000, ldo3_17_volt_table), 217 PM800_LDO(ldo5, LDO5, LDO_ENA1_1, 4, 300000, ldo3_17_volt_table),
215 PM800_LDO(LDO6, LDO_ENA1_1, 5, 300000, ldo3_17_volt_table), 218 PM800_LDO(ldo6, LDO6, LDO_ENA1_1, 5, 300000, ldo3_17_volt_table),
216 PM800_LDO(LDO7, LDO_ENA1_1, 6, 300000, ldo3_17_volt_table), 219 PM800_LDO(ldo7, LDO7, LDO_ENA1_1, 6, 300000, ldo3_17_volt_table),
217 PM800_LDO(LDO8, LDO_ENA1_1, 7, 300000, ldo3_17_volt_table), 220 PM800_LDO(ldo8, LDO8, LDO_ENA1_1, 7, 300000, ldo3_17_volt_table),
218 PM800_LDO(LDO9, LDO_ENA1_2, 0, 300000, ldo3_17_volt_table), 221 PM800_LDO(ldo9, LDO9, LDO_ENA1_2, 0, 300000, ldo3_17_volt_table),
219 PM800_LDO(LDO10, LDO_ENA1_2, 1, 300000, ldo3_17_volt_table), 222 PM800_LDO(ldo10, LDO10, LDO_ENA1_2, 1, 300000, ldo3_17_volt_table),
220 PM800_LDO(LDO11, LDO_ENA1_2, 2, 300000, ldo3_17_volt_table), 223 PM800_LDO(ldo11, LDO11, LDO_ENA1_2, 2, 300000, ldo3_17_volt_table),
221 PM800_LDO(LDO12, LDO_ENA1_2, 3, 300000, ldo3_17_volt_table), 224 PM800_LDO(ldo12, LDO12, LDO_ENA1_2, 3, 300000, ldo3_17_volt_table),
222 PM800_LDO(LDO13, LDO_ENA1_2, 4, 300000, ldo3_17_volt_table), 225 PM800_LDO(ldo13, LDO13, LDO_ENA1_2, 4, 300000, ldo3_17_volt_table),
223 PM800_LDO(LDO14, LDO_ENA1_2, 5, 300000, ldo3_17_volt_table), 226 PM800_LDO(ldo14, LDO14, LDO_ENA1_2, 5, 300000, ldo3_17_volt_table),
224 PM800_LDO(LDO15, LDO_ENA1_2, 6, 300000, ldo3_17_volt_table), 227 PM800_LDO(ldo15, LDO15, LDO_ENA1_2, 6, 300000, ldo3_17_volt_table),
225 PM800_LDO(LDO16, LDO_ENA1_2, 7, 300000, ldo3_17_volt_table), 228 PM800_LDO(ldo16, LDO16, LDO_ENA1_2, 7, 300000, ldo3_17_volt_table),
226 PM800_LDO(LDO17, LDO_ENA1_3, 0, 300000, ldo3_17_volt_table), 229 PM800_LDO(ldo17, LDO17, LDO_ENA1_3, 0, 300000, ldo3_17_volt_table),
227 PM800_LDO(LDO18, LDO_ENA1_3, 1, 200000, ldo18_19_volt_table), 230 PM800_LDO(ldo18, LDO18, LDO_ENA1_3, 1, 200000, ldo18_19_volt_table),
228 PM800_LDO(LDO19, LDO_ENA1_3, 2, 200000, ldo18_19_volt_table), 231 PM800_LDO(ldo19, LDO19, LDO_ENA1_3, 2, 200000, ldo18_19_volt_table),
229};
230
231#define PM800_REGULATOR_OF_MATCH(_name, _id) \
232 [PM800_ID_##_id] = { \
233 .name = #_name, \
234 .driver_data = &pm800_regulator_info[PM800_ID_##_id], \
235 }
236
237static struct of_regulator_match pm800_regulator_matches[] = {
238 PM800_REGULATOR_OF_MATCH(buck1, BUCK1),
239 PM800_REGULATOR_OF_MATCH(buck2, BUCK2),
240 PM800_REGULATOR_OF_MATCH(buck3, BUCK3),
241 PM800_REGULATOR_OF_MATCH(buck4, BUCK4),
242 PM800_REGULATOR_OF_MATCH(buck5, BUCK5),
243 PM800_REGULATOR_OF_MATCH(ldo1, LDO1),
244 PM800_REGULATOR_OF_MATCH(ldo2, LDO2),
245 PM800_REGULATOR_OF_MATCH(ldo3, LDO3),
246 PM800_REGULATOR_OF_MATCH(ldo4, LDO4),
247 PM800_REGULATOR_OF_MATCH(ldo5, LDO5),
248 PM800_REGULATOR_OF_MATCH(ldo6, LDO6),
249 PM800_REGULATOR_OF_MATCH(ldo7, LDO7),
250 PM800_REGULATOR_OF_MATCH(ldo8, LDO8),
251 PM800_REGULATOR_OF_MATCH(ldo9, LDO9),
252 PM800_REGULATOR_OF_MATCH(ldo10, LDO10),
253 PM800_REGULATOR_OF_MATCH(ldo11, LDO11),
254 PM800_REGULATOR_OF_MATCH(ldo12, LDO12),
255 PM800_REGULATOR_OF_MATCH(ldo13, LDO13),
256 PM800_REGULATOR_OF_MATCH(ldo14, LDO14),
257 PM800_REGULATOR_OF_MATCH(ldo15, LDO15),
258 PM800_REGULATOR_OF_MATCH(ldo16, LDO16),
259 PM800_REGULATOR_OF_MATCH(ldo17, LDO17),
260 PM800_REGULATOR_OF_MATCH(ldo18, LDO18),
261 PM800_REGULATOR_OF_MATCH(ldo19, LDO19),
262}; 232};
263 233
264static int pm800_regulator_dt_init(struct platform_device *pdev)
265{
266 struct device_node *np = pdev->dev.of_node;
267 int ret;
268
269 ret = of_regulator_match(&pdev->dev, np,
270 pm800_regulator_matches,
271 ARRAY_SIZE(pm800_regulator_matches));
272 if (ret < 0)
273 return ret;
274
275 return 0;
276}
277
278static int pm800_regulator_probe(struct platform_device *pdev) 234static int pm800_regulator_probe(struct platform_device *pdev)
279{ 235{
280 struct pm80x_chip *chip = dev_get_drvdata(pdev->dev.parent); 236 struct pm80x_chip *chip = dev_get_drvdata(pdev->dev.parent);
281 struct pm80x_platform_data *pdata = dev_get_platdata(pdev->dev.parent); 237 struct pm80x_platform_data *pdata = dev_get_platdata(pdev->dev.parent);
282 struct pm800_regulators *pm800_data; 238 struct pm800_regulators *pm800_data;
283 struct pm800_regulator_info *info;
284 struct regulator_config config = { }; 239 struct regulator_config config = { };
285 struct regulator_init_data *init_data; 240 struct regulator_init_data *init_data;
286 int i, ret; 241 int i, ret;
287 242
288 if (!pdata || pdata->num_regulators == 0) { 243 if (pdata && pdata->num_regulators) {
289 if (IS_ENABLED(CONFIG_OF)) {
290 ret = pm800_regulator_dt_init(pdev);
291 if (ret)
292 return ret;
293 } else {
294 return -ENODEV;
295 }
296 } else if (pdata->num_regulators) {
297 unsigned int count = 0; 244 unsigned int count = 0;
298 245
299 /* Check whether num_regulator is valid. */ 246 /* Check whether num_regulator is valid. */
@@ -303,8 +250,6 @@ static int pm800_regulator_probe(struct platform_device *pdev)
303 } 250 }
304 if (count != pdata->num_regulators) 251 if (count != pdata->num_regulators)
305 return -EINVAL; 252 return -EINVAL;
306 } else {
307 return -EINVAL;
308 } 253 }
309 254
310 pm800_data = devm_kzalloc(&pdev->dev, sizeof(*pm800_data), 255 pm800_data = devm_kzalloc(&pdev->dev, sizeof(*pm800_data),
@@ -317,30 +262,27 @@ static int pm800_regulator_probe(struct platform_device *pdev)
317 262
318 platform_set_drvdata(pdev, pm800_data); 263 platform_set_drvdata(pdev, pm800_data);
319 264
265 config.dev = chip->dev;
266 config.regmap = pm800_data->map;
320 for (i = 0; i < PM800_ID_RG_MAX; i++) { 267 for (i = 0; i < PM800_ID_RG_MAX; i++) {
321 if (!pdata || pdata->num_regulators == 0) 268 struct regulator_dev *regulator;
322 init_data = pm800_regulator_matches[i].init_data; 269
323 else 270 if (pdata && pdata->num_regulators) {
324 init_data = pdata->regulators[i]; 271 init_data = pdata->regulators[i];
325 if (!init_data) 272 if (!init_data)
326 continue; 273 continue;
327 info = pm800_regulator_matches[i].driver_data;
328 config.dev = &pdev->dev;
329 config.init_data = init_data;
330 config.driver_data = info;
331 config.regmap = pm800_data->map;
332 config.of_node = pm800_regulator_matches[i].of_node;
333
334 pm800_data->regulators[i] =
335 regulator_register(&info->desc, &config);
336 if (IS_ERR(pm800_data->regulators[i])) {
337 ret = PTR_ERR(pm800_data->regulators[i]);
338 dev_err(&pdev->dev, "Failed to register %s\n",
339 info->desc.name);
340 274
341 while (--i >= 0) 275 config.init_data = init_data;
342 regulator_unregister(pm800_data->regulators[i]); 276 }
277
278 config.driver_data = &pm800_regulator_info[i];
343 279
280 regulator = devm_regulator_register(&pdev->dev,
281 &pm800_regulator_info[i].desc, &config);
282 if (IS_ERR(regulator)) {
283 ret = PTR_ERR(regulator);
284 dev_err(&pdev->dev, "Failed to register %s\n",
285 pm800_regulator_info[i].desc.name);
344 return ret; 286 return ret;
345 } 287 }
346 } 288 }
@@ -348,23 +290,11 @@ static int pm800_regulator_probe(struct platform_device *pdev)
348 return 0; 290 return 0;
349} 291}
350 292
351static int pm800_regulator_remove(struct platform_device *pdev)
352{
353 struct pm800_regulators *pm800_data = platform_get_drvdata(pdev);
354 int i;
355
356 for (i = 0; i < PM800_ID_RG_MAX; i++)
357 regulator_unregister(pm800_data->regulators[i]);
358
359 return 0;
360}
361
362static struct platform_driver pm800_regulator_driver = { 293static struct platform_driver pm800_regulator_driver = {
363 .driver = { 294 .driver = {
364 .name = "88pm80x-regulator", 295 .name = "88pm80x-regulator",
365 }, 296 },
366 .probe = pm800_regulator_probe, 297 .probe = pm800_regulator_probe,
367 .remove = pm800_regulator_remove,
368}; 298};
369 299
370module_platform_driver(pm800_regulator_driver); 300module_platform_driver(pm800_regulator_driver);
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index bef3bde6971b..64bccff557be 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -209,13 +209,13 @@ config REGULATOR_DA9210
209 interface. 209 interface.
210 210
211config REGULATOR_DA9211 211config REGULATOR_DA9211
212 tristate "Dialog Semiconductor DA9211/DA9212/DA9213/DA9214 regulator" 212 tristate "Dialog Semiconductor DA9211/DA9212/DA9213/DA9214/DA9215 regulator"
213 depends on I2C 213 depends on I2C
214 select REGMAP_I2C 214 select REGMAP_I2C
215 help 215 help
216 Say y here to support for the Dialog Semiconductor DA9211/DA9212 216 Say y here to support for the Dialog Semiconductor DA9211/DA9212
217 /DA9213/DA9214. 217 /DA9213/DA9214/DA9215.
218 The DA9211/DA9212/DA9213/DA9214 is a multi-phase synchronous 218 The DA9211/DA9212/DA9213/DA9214/DA9215 is a multi-phase synchronous
219 step down converter 12A or 16A DC-DC Buck controlled through an I2C 219 step down converter 12A or 16A DC-DC Buck controlled through an I2C
220 interface. 220 interface.
221 221
@@ -407,13 +407,13 @@ config REGULATOR_MAX77686
407 Exynos-4 chips to control VARM and VINT voltages. 407 Exynos-4 chips to control VARM and VINT voltages.
408 408
409config REGULATOR_MAX77693 409config REGULATOR_MAX77693
410 tristate "Maxim MAX77693 regulator" 410 tristate "Maxim 77693/77843 regulator"
411 depends on MFD_MAX77693 411 depends on (MFD_MAX77693 || MFD_MAX77843)
412 help 412 help
413 This driver controls a Maxim 77693 regulator via I2C bus. 413 This driver controls a Maxim 77693/77843 regulators via I2C bus.
414 The regulators include two LDOs, 'SAFEOUT1', 'SAFEOUT2' 414 The regulators include two LDOs, 'SAFEOUT1', 'SAFEOUT2'
415 and one current regulator 'CHARGER'. This is suitable for 415 and one current regulator 'CHARGER'. This is suitable for
416 Exynos-4x12 chips. 416 Exynos-4x12 (MAX77693) or Exynos5433 (MAX77843) SoC chips.
417 417
418config REGULATOR_MAX77802 418config REGULATOR_MAX77802
419 tristate "Maxim 77802 regulator" 419 tristate "Maxim 77802 regulator"
@@ -424,14 +424,6 @@ config REGULATOR_MAX77802
424 Exynos5420/Exynos5800 SoCs to control various voltages. 424 Exynos5420/Exynos5800 SoCs to control various voltages.
425 It includes support for control of voltage and ramp speed. 425 It includes support for control of voltage and ramp speed.
426 426
427config REGULATOR_MAX77843
428 tristate "Maxim 77843 regulator"
429 depends on MFD_MAX77843
430 help
431 This driver controls a Maxim 77843 regulator.
432 The regulator include two 'SAFEOUT' for USB(Universal Serial Bus)
433 This is suitable for Exynos5433 SoC chips.
434
435config REGULATOR_MC13XXX_CORE 427config REGULATOR_MC13XXX_CORE
436 tristate 428 tristate
437 429
@@ -451,6 +443,15 @@ config REGULATOR_MC13892
451 Say y here to support the regulators found on the Freescale MC13892 443 Say y here to support the regulators found on the Freescale MC13892
452 PMIC. 444 PMIC.
453 445
446config REGULATOR_MT6311
447 tristate "MediaTek MT6311 PMIC"
448 depends on I2C
449 help
450 Say y here to select this option to enable the power regulator of
451 MediaTek MT6311 PMIC.
452 This driver supports the control of different power rails of device
453 through regulator interface.
454
454config REGULATOR_MT6397 455config REGULATOR_MT6397
455 tristate "MediaTek MT6397 PMIC" 456 tristate "MediaTek MT6397 PMIC"
456 depends on MFD_MT6397 457 depends on MFD_MT6397
@@ -522,6 +523,18 @@ config REGULATOR_QCOM_RPM
522 Qualcomm RPM as a module. The module will be named 523 Qualcomm RPM as a module. The module will be named
523 "qcom_rpm-regulator". 524 "qcom_rpm-regulator".
524 525
526config REGULATOR_QCOM_SMD_RPM
527 tristate "Qualcomm SMD based RPM regulator driver"
528 depends on QCOM_SMD_RPM
529 help
530 If you say yes to this option, support will be included for the
531 regulators exposed by the Resource Power Manager found in Qualcomm
532 8974 based devices.
533
534 Say M here if you want to include support for the regulators on the
535 Qualcomm RPM as a module. The module will be named
536 "qcom_smd-regulator".
537
525config REGULATOR_QCOM_SPMI 538config REGULATOR_QCOM_SPMI
526 tristate "Qualcomm SPMI regulator driver" 539 tristate "Qualcomm SPMI regulator driver"
527 depends on SPMI || COMPILE_TEST 540 depends on SPMI || COMPILE_TEST
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 91bf76267404..0f8174913c17 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -56,12 +56,13 @@ obj-$(CONFIG_REGULATOR_MAX8998) += max8998.o
56obj-$(CONFIG_REGULATOR_MAX77686) += max77686.o 56obj-$(CONFIG_REGULATOR_MAX77686) += max77686.o
57obj-$(CONFIG_REGULATOR_MAX77693) += max77693.o 57obj-$(CONFIG_REGULATOR_MAX77693) += max77693.o
58obj-$(CONFIG_REGULATOR_MAX77802) += max77802.o 58obj-$(CONFIG_REGULATOR_MAX77802) += max77802.o
59obj-$(CONFIG_REGULATOR_MAX77843) += max77843.o
60obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o 59obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
61obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o 60obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o
62obj-$(CONFIG_REGULATOR_MC13XXX_CORE) += mc13xxx-regulator-core.o 61obj-$(CONFIG_REGULATOR_MC13XXX_CORE) += mc13xxx-regulator-core.o
62obj-$(CONFIG_REGULATOR_MT6311) += mt6311-regulator.o
63obj-$(CONFIG_REGULATOR_MT6397) += mt6397-regulator.o 63obj-$(CONFIG_REGULATOR_MT6397) += mt6397-regulator.o
64obj-$(CONFIG_REGULATOR_QCOM_RPM) += qcom_rpm-regulator.o 64obj-$(CONFIG_REGULATOR_QCOM_RPM) += qcom_rpm-regulator.o
65obj-$(CONFIG_REGULATOR_QCOM_SMD_RPM) += qcom_smd-regulator.o
65obj-$(CONFIG_REGULATOR_QCOM_SPMI) += qcom_spmi-regulator.o 66obj-$(CONFIG_REGULATOR_QCOM_SPMI) += qcom_spmi-regulator.o
66obj-$(CONFIG_REGULATOR_PALMAS) += palmas-regulator.o 67obj-$(CONFIG_REGULATOR_PALMAS) += palmas-regulator.o
67obj-$(CONFIG_REGULATOR_PFUZE100) += pfuze100-regulator.o 68obj-$(CONFIG_REGULATOR_PFUZE100) += pfuze100-regulator.o
diff --git a/drivers/regulator/act8865-regulator.c b/drivers/regulator/act8865-regulator.c
index 2ff73d72ca34..896db168e4bd 100644
--- a/drivers/regulator/act8865-regulator.c
+++ b/drivers/regulator/act8865-regulator.c
@@ -530,7 +530,6 @@ MODULE_DEVICE_TABLE(i2c, act8865_ids);
530static struct i2c_driver act8865_pmic_driver = { 530static struct i2c_driver act8865_pmic_driver = {
531 .driver = { 531 .driver = {
532 .name = "act8865", 532 .name = "act8865",
533 .owner = THIS_MODULE,
534 }, 533 },
535 .probe = act8865_pmic_probe, 534 .probe = act8865_pmic_probe,
536 .id_table = act8865_ids, 535 .id_table = act8865_ids,
diff --git a/drivers/regulator/ad5398.c b/drivers/regulator/ad5398.c
index 48016a050d5f..ea50a886ba63 100644
--- a/drivers/regulator/ad5398.c
+++ b/drivers/regulator/ad5398.c
@@ -275,4 +275,3 @@ module_exit(ad5398_exit);
275MODULE_DESCRIPTION("AD5398 and AD5821 current regulator driver"); 275MODULE_DESCRIPTION("AD5398 and AD5821 current regulator driver");
276MODULE_AUTHOR("Sonic Zhang"); 276MODULE_AUTHOR("Sonic Zhang");
277MODULE_LICENSE("GPL"); 277MODULE_LICENSE("GPL");
278MODULE_ALIAS("i2c:ad5398-regulator");
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index 646829132b59..01bf3476a791 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -405,3 +405,4 @@ module_platform_driver(axp20x_regulator_driver);
405MODULE_LICENSE("GPL v2"); 405MODULE_LICENSE("GPL v2");
406MODULE_AUTHOR("Carlo Caione <carlo@caione.org>"); 406MODULE_AUTHOR("Carlo Caione <carlo@caione.org>");
407MODULE_DESCRIPTION("Regulator Driver for AXP20X PMIC"); 407MODULE_DESCRIPTION("Regulator Driver for AXP20X PMIC");
408MODULE_ALIAS("platform:axp20x-regulator");
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index c9f72019bd68..de9f272a0faf 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -109,6 +109,12 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
109static struct regulator *create_regulator(struct regulator_dev *rdev, 109static struct regulator *create_regulator(struct regulator_dev *rdev,
110 struct device *dev, 110 struct device *dev,
111 const char *supply_name); 111 const char *supply_name);
112static void _regulator_put(struct regulator *regulator);
113
114static struct regulator_dev *dev_to_rdev(struct device *dev)
115{
116 return container_of(dev, struct regulator_dev, dev);
117}
112 118
113static const char *rdev_get_name(struct regulator_dev *rdev) 119static const char *rdev_get_name(struct regulator_dev *rdev)
114{ 120{
@@ -295,7 +301,7 @@ static int regulator_check_drms(struct regulator_dev *rdev)
295 return -ENODEV; 301 return -ENODEV;
296 } 302 }
297 if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS)) { 303 if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS)) {
298 rdev_err(rdev, "operation not allowed\n"); 304 rdev_dbg(rdev, "operation not allowed\n");
299 return -EPERM; 305 return -EPERM;
300 } 306 }
301 return 0; 307 return 0;
@@ -640,6 +646,8 @@ static int drms_uA_update(struct regulator_dev *rdev)
640 int current_uA = 0, output_uV, input_uV, err; 646 int current_uA = 0, output_uV, input_uV, err;
641 unsigned int mode; 647 unsigned int mode;
642 648
649 lockdep_assert_held_once(&rdev->mutex);
650
643 /* 651 /*
644 * first check to see if we can set modes at all, otherwise just 652 * first check to see if we can set modes at all, otherwise just
645 * tell the consumer everything is OK. 653 * tell the consumer everything is OK.
@@ -760,6 +768,8 @@ static int suspend_set_state(struct regulator_dev *rdev,
760/* locks held by caller */ 768/* locks held by caller */
761static int suspend_prepare(struct regulator_dev *rdev, suspend_state_t state) 769static int suspend_prepare(struct regulator_dev *rdev, suspend_state_t state)
762{ 770{
771 lockdep_assert_held_once(&rdev->mutex);
772
763 if (!rdev->constraints) 773 if (!rdev->constraints)
764 return -EINVAL; 774 return -EINVAL;
765 775
@@ -1081,6 +1091,15 @@ static int set_machine_constraints(struct regulator_dev *rdev,
1081 } 1091 }
1082 } 1092 }
1083 1093
1094 if (rdev->constraints->over_current_protection
1095 && ops->set_over_current_protection) {
1096 ret = ops->set_over_current_protection(rdev);
1097 if (ret < 0) {
1098 rdev_err(rdev, "failed to set over current protection\n");
1099 goto out;
1100 }
1101 }
1102
1084 print_constraints(rdev); 1103 print_constraints(rdev);
1085 return 0; 1104 return 0;
1086out: 1105out:
@@ -1105,6 +1124,9 @@ static int set_supply(struct regulator_dev *rdev,
1105 1124
1106 rdev_info(rdev, "supplied by %s\n", rdev_get_name(supply_rdev)); 1125 rdev_info(rdev, "supplied by %s\n", rdev_get_name(supply_rdev));
1107 1126
1127 if (!try_module_get(supply_rdev->owner))
1128 return -ENODEV;
1129
1108 rdev->supply = create_regulator(supply_rdev, &rdev->dev, "SUPPLY"); 1130 rdev->supply = create_regulator(supply_rdev, &rdev->dev, "SUPPLY");
1109 if (rdev->supply == NULL) { 1131 if (rdev->supply == NULL) {
1110 err = -ENOMEM; 1132 err = -ENOMEM;
@@ -1381,9 +1403,13 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
1381 } 1403 }
1382 1404
1383 if (!r) { 1405 if (!r) {
1384 dev_err(dev, "Failed to resolve %s-supply for %s\n", 1406 if (have_full_constraints()) {
1385 rdev->supply_name, rdev->desc->name); 1407 r = dummy_regulator_rdev;
1386 return -EPROBE_DEFER; 1408 } else {
1409 dev_err(dev, "Failed to resolve %s-supply for %s\n",
1410 rdev->supply_name, rdev->desc->name);
1411 return -EPROBE_DEFER;
1412 }
1387 } 1413 }
1388 1414
1389 /* Recursively resolve the supply of the supply */ 1415 /* Recursively resolve the supply of the supply */
@@ -1398,8 +1424,11 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
1398 /* Cascade always-on state to supply */ 1424 /* Cascade always-on state to supply */
1399 if (_regulator_is_enabled(rdev)) { 1425 if (_regulator_is_enabled(rdev)) {
1400 ret = regulator_enable(rdev->supply); 1426 ret = regulator_enable(rdev->supply);
1401 if (ret < 0) 1427 if (ret < 0) {
1428 if (rdev->supply)
1429 _regulator_put(rdev->supply);
1402 return ret; 1430 return ret;
1431 }
1403 } 1432 }
1404 1433
1405 return 0; 1434 return 0;
@@ -1584,9 +1613,11 @@ static void _regulator_put(struct regulator *regulator)
1584{ 1613{
1585 struct regulator_dev *rdev; 1614 struct regulator_dev *rdev;
1586 1615
1587 if (regulator == NULL || IS_ERR(regulator)) 1616 if (IS_ERR_OR_NULL(regulator))
1588 return; 1617 return;
1589 1618
1619 lockdep_assert_held_once(&regulator_list_mutex);
1620
1590 rdev = regulator->rdev; 1621 rdev = regulator->rdev;
1591 1622
1592 debugfs_remove_recursive(regulator->debugfs); 1623 debugfs_remove_recursive(regulator->debugfs);
@@ -1595,14 +1626,15 @@ static void _regulator_put(struct regulator *regulator)
1595 if (regulator->dev) 1626 if (regulator->dev)
1596 sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name); 1627 sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name);
1597 mutex_lock(&rdev->mutex); 1628 mutex_lock(&rdev->mutex);
1598 kfree(regulator->supply_name);
1599 list_del(&regulator->list); 1629 list_del(&regulator->list);
1600 kfree(regulator);
1601 1630
1602 rdev->open_count--; 1631 rdev->open_count--;
1603 rdev->exclusive = 0; 1632 rdev->exclusive = 0;
1604 mutex_unlock(&rdev->mutex); 1633 mutex_unlock(&rdev->mutex);
1605 1634
1635 kfree(regulator->supply_name);
1636 kfree(regulator);
1637
1606 module_put(rdev->owner); 1638 module_put(rdev->owner);
1607} 1639}
1608 1640
@@ -1965,6 +1997,8 @@ static int _regulator_enable(struct regulator_dev *rdev)
1965{ 1997{
1966 int ret; 1998 int ret;
1967 1999
2000 lockdep_assert_held_once(&rdev->mutex);
2001
1968 /* check voltage and requested load before enabling */ 2002 /* check voltage and requested load before enabling */
1969 if (rdev->constraints && 2003 if (rdev->constraints &&
1970 (rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS)) 2004 (rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS))
@@ -2065,6 +2099,8 @@ static int _regulator_disable(struct regulator_dev *rdev)
2065{ 2099{
2066 int ret = 0; 2100 int ret = 0;
2067 2101
2102 lockdep_assert_held_once(&rdev->mutex);
2103
2068 if (WARN(rdev->use_count <= 0, 2104 if (WARN(rdev->use_count <= 0,
2069 "unbalanced disables for %s\n", rdev_get_name(rdev))) 2105 "unbalanced disables for %s\n", rdev_get_name(rdev)))
2070 return -EIO; 2106 return -EIO;
@@ -2143,6 +2179,8 @@ static int _regulator_force_disable(struct regulator_dev *rdev)
2143{ 2179{
2144 int ret = 0; 2180 int ret = 0;
2145 2181
2182 lockdep_assert_held_once(&rdev->mutex);
2183
2146 ret = _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE | 2184 ret = _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE |
2147 REGULATOR_EVENT_PRE_DISABLE, NULL); 2185 REGULATOR_EVENT_PRE_DISABLE, NULL);
2148 if (ret & NOTIFY_STOP_MASK) 2186 if (ret & NOTIFY_STOP_MASK)
@@ -2711,7 +2749,7 @@ int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
2711 goto out; 2749 goto out;
2712 2750
2713 /* If we're trying to set a range that overlaps the current voltage, 2751 /* If we're trying to set a range that overlaps the current voltage,
2714 * return succesfully even though the regulator does not support 2752 * return successfully even though the regulator does not support
2715 * changing the voltage. 2753 * changing the voltage.
2716 */ 2754 */
2717 if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) { 2755 if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
@@ -3439,6 +3477,8 @@ EXPORT_SYMBOL_GPL(regulator_bulk_free);
3439int regulator_notifier_call_chain(struct regulator_dev *rdev, 3477int regulator_notifier_call_chain(struct regulator_dev *rdev,
3440 unsigned long event, void *data) 3478 unsigned long event, void *data)
3441{ 3479{
3480 lockdep_assert_held_once(&rdev->mutex);
3481
3442 _notifier_call_chain(rdev, event, data); 3482 _notifier_call_chain(rdev, event, data);
3443 return NOTIFY_DONE; 3483 return NOTIFY_DONE;
3444 3484
@@ -3583,6 +3623,9 @@ static const struct attribute_group *regulator_dev_groups[] = {
3583static void regulator_dev_release(struct device *dev) 3623static void regulator_dev_release(struct device *dev)
3584{ 3624{
3585 struct regulator_dev *rdev = dev_get_drvdata(dev); 3625 struct regulator_dev *rdev = dev_get_drvdata(dev);
3626
3627 kfree(rdev->constraints);
3628 of_node_put(rdev->dev.of_node);
3586 kfree(rdev); 3629 kfree(rdev);
3587} 3630}
3588 3631
@@ -3813,11 +3856,9 @@ void regulator_unregister(struct regulator_dev *rdev)
3813 WARN_ON(rdev->open_count); 3856 WARN_ON(rdev->open_count);
3814 unset_regulator_supplies(rdev); 3857 unset_regulator_supplies(rdev);
3815 list_del(&rdev->list); 3858 list_del(&rdev->list);
3816 kfree(rdev->constraints); 3859 mutex_unlock(&regulator_list_mutex);
3817 regulator_ena_gpio_free(rdev); 3860 regulator_ena_gpio_free(rdev);
3818 of_node_put(rdev->dev.of_node);
3819 device_unregister(&rdev->dev); 3861 device_unregister(&rdev->dev);
3820 mutex_unlock(&regulator_list_mutex);
3821} 3862}
3822EXPORT_SYMBOL_GPL(regulator_unregister); 3863EXPORT_SYMBOL_GPL(regulator_unregister);
3823 3864
@@ -4136,13 +4177,57 @@ static int __init regulator_init(void)
4136/* init early to allow our consumers to complete system booting */ 4177/* init early to allow our consumers to complete system booting */
4137core_initcall(regulator_init); 4178core_initcall(regulator_init);
4138 4179
4139static int __init regulator_init_complete(void) 4180static int __init regulator_late_cleanup(struct device *dev, void *data)
4140{ 4181{
4141 struct regulator_dev *rdev; 4182 struct regulator_dev *rdev = dev_to_rdev(dev);
4142 const struct regulator_ops *ops; 4183 const struct regulator_ops *ops = rdev->desc->ops;
4143 struct regulation_constraints *c; 4184 struct regulation_constraints *c = rdev->constraints;
4144 int enabled, ret; 4185 int enabled, ret;
4145 4186
4187 if (c && c->always_on)
4188 return 0;
4189
4190 if (c && !(c->valid_ops_mask & REGULATOR_CHANGE_STATUS))
4191 return 0;
4192
4193 mutex_lock(&rdev->mutex);
4194
4195 if (rdev->use_count)
4196 goto unlock;
4197
4198 /* If we can't read the status assume it's on. */
4199 if (ops->is_enabled)
4200 enabled = ops->is_enabled(rdev);
4201 else
4202 enabled = 1;
4203
4204 if (!enabled)
4205 goto unlock;
4206
4207 if (have_full_constraints()) {
4208 /* We log since this may kill the system if it goes
4209 * wrong. */
4210 rdev_info(rdev, "disabling\n");
4211 ret = _regulator_do_disable(rdev);
4212 if (ret != 0)
4213 rdev_err(rdev, "couldn't disable: %d\n", ret);
4214 } else {
4215 /* The intention is that in future we will
4216 * assume that full constraints are provided
4217 * so warn even if we aren't going to do
4218 * anything here.
4219 */
4220 rdev_warn(rdev, "incomplete constraints, leaving on\n");
4221 }
4222
4223unlock:
4224 mutex_unlock(&rdev->mutex);
4225
4226 return 0;
4227}
4228
4229static int __init regulator_init_complete(void)
4230{
4146 /* 4231 /*
4147 * Since DT doesn't provide an idiomatic mechanism for 4232 * Since DT doesn't provide an idiomatic mechanism for
4148 * enabling full constraints and since it's much more natural 4233 * enabling full constraints and since it's much more natural
@@ -4152,58 +4237,13 @@ static int __init regulator_init_complete(void)
4152 if (of_have_populated_dt()) 4237 if (of_have_populated_dt())
4153 has_full_constraints = true; 4238 has_full_constraints = true;
4154 4239
4155 mutex_lock(&regulator_list_mutex);
4156
4157 /* If we have a full configuration then disable any regulators 4240 /* If we have a full configuration then disable any regulators
4158 * we have permission to change the status for and which are 4241 * we have permission to change the status for and which are
4159 * not in use or always_on. This is effectively the default 4242 * not in use or always_on. This is effectively the default
4160 * for DT and ACPI as they have full constraints. 4243 * for DT and ACPI as they have full constraints.
4161 */ 4244 */
4162 list_for_each_entry(rdev, &regulator_list, list) { 4245 class_for_each_device(&regulator_class, NULL, NULL,
4163 ops = rdev->desc->ops; 4246 regulator_late_cleanup);
4164 c = rdev->constraints;
4165
4166 if (c && c->always_on)
4167 continue;
4168
4169 if (c && !(c->valid_ops_mask & REGULATOR_CHANGE_STATUS))
4170 continue;
4171
4172 mutex_lock(&rdev->mutex);
4173
4174 if (rdev->use_count)
4175 goto unlock;
4176
4177 /* If we can't read the status assume it's on. */
4178 if (ops->is_enabled)
4179 enabled = ops->is_enabled(rdev);
4180 else
4181 enabled = 1;
4182
4183 if (!enabled)
4184 goto unlock;
4185
4186 if (have_full_constraints()) {
4187 /* We log since this may kill the system if it
4188 * goes wrong. */
4189 rdev_info(rdev, "disabling\n");
4190 ret = _regulator_do_disable(rdev);
4191 if (ret != 0)
4192 rdev_err(rdev, "couldn't disable: %d\n", ret);
4193 } else {
4194 /* The intention is that in future we will
4195 * assume that full constraints are provided
4196 * so warn even if we aren't going to do
4197 * anything here.
4198 */
4199 rdev_warn(rdev, "incomplete constraints, leaving on\n");
4200 }
4201
4202unlock:
4203 mutex_unlock(&rdev->mutex);
4204 }
4205
4206 mutex_unlock(&regulator_list_mutex);
4207 4247
4208 return 0; 4248 return 0;
4209} 4249}
diff --git a/drivers/regulator/da9062-regulator.c b/drivers/regulator/da9062-regulator.c
index dd76da09b3c7..5638fe8d759d 100644
--- a/drivers/regulator/da9062-regulator.c
+++ b/drivers/regulator/da9062-regulator.c
@@ -818,7 +818,6 @@ static int da9062_regulator_probe(struct platform_device *pdev)
818static struct platform_driver da9062_regulator_driver = { 818static struct platform_driver da9062_regulator_driver = {
819 .driver = { 819 .driver = {
820 .name = "da9062-regulators", 820 .name = "da9062-regulators",
821 .owner = THIS_MODULE,
822 }, 821 },
823 .probe = da9062_regulator_probe, 822 .probe = da9062_regulator_probe,
824}; 823};
diff --git a/drivers/regulator/da9210-regulator.c b/drivers/regulator/da9210-regulator.c
index f0489cb9018b..b3517830edb6 100644
--- a/drivers/regulator/da9210-regulator.c
+++ b/drivers/regulator/da9210-regulator.c
@@ -22,6 +22,8 @@
22#include <linux/i2c.h> 22#include <linux/i2c.h>
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/interrupt.h>
26#include <linux/irq.h>
25#include <linux/slab.h> 27#include <linux/slab.h>
26#include <linux/regulator/driver.h> 28#include <linux/regulator/driver.h>
27#include <linux/regulator/machine.h> 29#include <linux/regulator/machine.h>
@@ -120,6 +122,55 @@ static int da9210_get_current_limit(struct regulator_dev *rdev)
120 return da9210_buck_limits[sel]; 122 return da9210_buck_limits[sel];
121} 123}
122 124
125static irqreturn_t da9210_irq_handler(int irq, void *data)
126{
127 struct da9210 *chip = data;
128 unsigned int val, handled = 0;
129 int error, ret = IRQ_NONE;
130
131 error = regmap_read(chip->regmap, DA9210_REG_EVENT_B, &val);
132 if (error < 0)
133 goto error_i2c;
134
135 if (val & DA9210_E_OVCURR) {
136 regulator_notifier_call_chain(chip->rdev,
137 REGULATOR_EVENT_OVER_CURRENT,
138 NULL);
139 handled |= DA9210_E_OVCURR;
140 }
141 if (val & DA9210_E_NPWRGOOD) {
142 regulator_notifier_call_chain(chip->rdev,
143 REGULATOR_EVENT_UNDER_VOLTAGE,
144 NULL);
145 handled |= DA9210_E_NPWRGOOD;
146 }
147 if (val & (DA9210_E_TEMP_WARN | DA9210_E_TEMP_CRIT)) {
148 regulator_notifier_call_chain(chip->rdev,
149 REGULATOR_EVENT_OVER_TEMP, NULL);
150 handled |= val & (DA9210_E_TEMP_WARN | DA9210_E_TEMP_CRIT);
151 }
152 if (val & DA9210_E_VMAX) {
153 regulator_notifier_call_chain(chip->rdev,
154 REGULATOR_EVENT_REGULATION_OUT,
155 NULL);
156 handled |= DA9210_E_VMAX;
157 }
158 if (handled) {
159 /* Clear handled events */
160 error = regmap_write(chip->regmap, DA9210_REG_EVENT_B, handled);
161 if (error < 0)
162 goto error_i2c;
163
164 ret = IRQ_HANDLED;
165 }
166
167 return ret;
168
169error_i2c:
170 dev_err(regmap_get_device(chip->regmap), "I2C error : %d\n", error);
171 return ret;
172}
173
123/* 174/*
124 * I2C driver interface functions 175 * I2C driver interface functions
125 */ 176 */
@@ -168,6 +219,30 @@ static int da9210_i2c_probe(struct i2c_client *i2c,
168 } 219 }
169 220
170 chip->rdev = rdev; 221 chip->rdev = rdev;
222 if (i2c->irq) {
223 error = devm_request_threaded_irq(&i2c->dev, i2c->irq, NULL,
224 da9210_irq_handler,
225 IRQF_TRIGGER_LOW |
226 IRQF_ONESHOT | IRQF_SHARED,
227 "da9210", chip);
228 if (error) {
229 dev_err(&i2c->dev, "Failed to request IRQ%u: %d\n",
230 i2c->irq, error);
231 return error;
232 }
233
234 error = regmap_update_bits(chip->regmap, DA9210_REG_MASK_B,
235 DA9210_M_OVCURR | DA9210_M_NPWRGOOD |
236 DA9210_M_TEMP_WARN |
237 DA9210_M_TEMP_CRIT | DA9210_M_VMAX, 0);
238 if (error < 0) {
239 dev_err(&i2c->dev, "Failed to update mask reg: %d\n",
240 error);
241 return error;
242 }
243 } else {
244 dev_warn(&i2c->dev, "No IRQ configured\n");
245 }
171 246
172 i2c_set_clientdata(i2c, chip); 247 i2c_set_clientdata(i2c, chip);
173 248
@@ -184,7 +259,6 @@ MODULE_DEVICE_TABLE(i2c, da9210_i2c_id);
184static struct i2c_driver da9210_regulator_driver = { 259static struct i2c_driver da9210_regulator_driver = {
185 .driver = { 260 .driver = {
186 .name = "da9210", 261 .name = "da9210",
187 .owner = THIS_MODULE,
188 }, 262 },
189 .probe = da9210_i2c_probe, 263 .probe = da9210_i2c_probe,
190 .id_table = da9210_i2c_id, 264 .id_table = da9210_i2c_id,
diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c
index df79e4b1946e..04ef65b7eb3d 100644
--- a/drivers/regulator/da9211-regulator.c
+++ b/drivers/regulator/da9211-regulator.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * da9211-regulator.c - Regulator device driver for DA9211/DA9213 2 * da9211-regulator.c - Regulator device driver for DA9211/DA9213/DA9215
3 * Copyright (C) 2014 Dialog Semiconductor Ltd. 3 * Copyright (C) 2015 Dialog Semiconductor Ltd.
4 * 4 *
5 * This library is free software; you can redistribute it and/or 5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Library General Public 6 * modify it under the terms of the GNU Library General Public
@@ -32,6 +32,7 @@
32/* DEVICE IDs */ 32/* DEVICE IDs */
33#define DA9211_DEVICE_ID 0x22 33#define DA9211_DEVICE_ID 0x22
34#define DA9213_DEVICE_ID 0x23 34#define DA9213_DEVICE_ID 0x23
35#define DA9215_DEVICE_ID 0x24
35 36
36#define DA9211_BUCK_MODE_SLEEP 1 37#define DA9211_BUCK_MODE_SLEEP 1
37#define DA9211_BUCK_MODE_SYNC 2 38#define DA9211_BUCK_MODE_SYNC 2
@@ -90,6 +91,13 @@ static const int da9213_current_limits[] = {
90 3000000, 3200000, 3400000, 3600000, 3800000, 4000000, 4200000, 4400000, 91 3000000, 3200000, 3400000, 3600000, 3800000, 4000000, 4200000, 4400000,
91 4600000, 4800000, 5000000, 5200000, 5400000, 5600000, 5800000, 6000000 92 4600000, 4800000, 5000000, 5200000, 5400000, 5600000, 5800000, 6000000
92}; 93};
94/* Current limits for DA9215 buck (uA) indices
95 * corresponds with register values
96 */
97static const int da9215_current_limits[] = {
98 4000000, 4200000, 4400000, 4600000, 4800000, 5000000, 5200000, 5400000,
99 5600000, 5800000, 6000000, 6200000, 6400000, 6600000, 6800000, 7000000
100};
93 101
94static unsigned int da9211_buck_get_mode(struct regulator_dev *rdev) 102static unsigned int da9211_buck_get_mode(struct regulator_dev *rdev)
95{ 103{
@@ -157,6 +165,10 @@ static int da9211_set_current_limit(struct regulator_dev *rdev, int min,
157 current_limits = da9213_current_limits; 165 current_limits = da9213_current_limits;
158 max_size = ARRAY_SIZE(da9213_current_limits)-1; 166 max_size = ARRAY_SIZE(da9213_current_limits)-1;
159 break; 167 break;
168 case DA9215:
169 current_limits = da9215_current_limits;
170 max_size = ARRAY_SIZE(da9215_current_limits)-1;
171 break;
160 default: 172 default:
161 return -EINVAL; 173 return -EINVAL;
162 } 174 }
@@ -189,6 +201,9 @@ static int da9211_get_current_limit(struct regulator_dev *rdev)
189 case DA9213: 201 case DA9213:
190 current_limits = da9213_current_limits; 202 current_limits = da9213_current_limits;
191 break; 203 break;
204 case DA9215:
205 current_limits = da9215_current_limits;
206 break;
192 default: 207 default:
193 return -EINVAL; 208 return -EINVAL;
194 } 209 }
@@ -350,13 +365,11 @@ static int da9211_regulator_init(struct da9211 *chip)
350 /* If configuration for 1/2 bucks is different between platform data 365 /* If configuration for 1/2 bucks is different between platform data
351 * and the register, driver should exit. 366 * and the register, driver should exit.
352 */ 367 */
353 if ((chip->pdata->num_buck == 2 && data == 0x40) 368 if (chip->pdata->num_buck == 1 && data == 0x00)
354 || (chip->pdata->num_buck == 1 && data == 0x00)) { 369 chip->num_regulator = 1;
355 if (data == 0) 370 else if (chip->pdata->num_buck == 2 && data != 0x00)
356 chip->num_regulator = 1; 371 chip->num_regulator = 2;
357 else 372 else {
358 chip->num_regulator = 2;
359 } else {
360 dev_err(chip->dev, "Configuration is mismatched\n"); 373 dev_err(chip->dev, "Configuration is mismatched\n");
361 return -EINVAL; 374 return -EINVAL;
362 } 375 }
@@ -438,6 +451,9 @@ static int da9211_i2c_probe(struct i2c_client *i2c,
438 case DA9213_DEVICE_ID: 451 case DA9213_DEVICE_ID:
439 chip->chip_id = DA9213; 452 chip->chip_id = DA9213;
440 break; 453 break;
454 case DA9215_DEVICE_ID:
455 chip->chip_id = DA9215;
456 break;
441 default: 457 default:
442 dev_err(chip->dev, "Unsupported device id = 0x%x.\n", data); 458 dev_err(chip->dev, "Unsupported device id = 0x%x.\n", data);
443 return -ENODEV; 459 return -ENODEV;
@@ -478,6 +494,7 @@ static int da9211_i2c_probe(struct i2c_client *i2c,
478static const struct i2c_device_id da9211_i2c_id[] = { 494static const struct i2c_device_id da9211_i2c_id[] = {
479 {"da9211", DA9211}, 495 {"da9211", DA9211},
480 {"da9213", DA9213}, 496 {"da9213", DA9213},
497 {"da9215", DA9215},
481 {}, 498 {},
482}; 499};
483MODULE_DEVICE_TABLE(i2c, da9211_i2c_id); 500MODULE_DEVICE_TABLE(i2c, da9211_i2c_id);
@@ -486,6 +503,7 @@ MODULE_DEVICE_TABLE(i2c, da9211_i2c_id);
486static const struct of_device_id da9211_dt_ids[] = { 503static const struct of_device_id da9211_dt_ids[] = {
487 { .compatible = "dlg,da9211", .data = &da9211_i2c_id[0] }, 504 { .compatible = "dlg,da9211", .data = &da9211_i2c_id[0] },
488 { .compatible = "dlg,da9213", .data = &da9211_i2c_id[1] }, 505 { .compatible = "dlg,da9213", .data = &da9211_i2c_id[1] },
506 { .compatible = "dlg,da9215", .data = &da9211_i2c_id[2] },
489 {}, 507 {},
490}; 508};
491MODULE_DEVICE_TABLE(of, da9211_dt_ids); 509MODULE_DEVICE_TABLE(of, da9211_dt_ids);
@@ -494,7 +512,6 @@ MODULE_DEVICE_TABLE(of, da9211_dt_ids);
494static struct i2c_driver da9211_regulator_driver = { 512static struct i2c_driver da9211_regulator_driver = {
495 .driver = { 513 .driver = {
496 .name = "da9211", 514 .name = "da9211",
497 .owner = THIS_MODULE,
498 .of_match_table = of_match_ptr(da9211_dt_ids), 515 .of_match_table = of_match_ptr(da9211_dt_ids),
499 }, 516 },
500 .probe = da9211_i2c_probe, 517 .probe = da9211_i2c_probe,
@@ -504,5 +521,5 @@ static struct i2c_driver da9211_regulator_driver = {
504module_i2c_driver(da9211_regulator_driver); 521module_i2c_driver(da9211_regulator_driver);
505 522
506MODULE_AUTHOR("James Ban <James.Ban.opensource@diasemi.com>"); 523MODULE_AUTHOR("James Ban <James.Ban.opensource@diasemi.com>");
507MODULE_DESCRIPTION("Regulator device driver for Dialog DA9211/DA9213"); 524MODULE_DESCRIPTION("Regulator device driver for Dialog DA9211/DA9213/DA9215");
508MODULE_LICENSE("GPL v2"); 525MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/da9211-regulator.h b/drivers/regulator/da9211-regulator.h
index 93fa9df2721c..d6ad96fc64d3 100644
--- a/drivers/regulator/da9211-regulator.h
+++ b/drivers/regulator/da9211-regulator.h
@@ -1,16 +1,16 @@
1/* 1/*
2 * da9211-regulator.h - Regulator definitions for DA9211/DA9213 2 * da9211-regulator.h - Regulator definitions for DA9211/DA9213/DA9215
3 * Copyright (C) 2014 Dialog Semiconductor Ltd. 3 * Copyright (C) 2015 Dialog Semiconductor Ltd.
4 * 4 *
5 * This library is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Library General Public 6 * modify it under the terms of the GNU General Public License
7 * License as published by the Free Software Foundation; either 7 * as published by the Free Software Foundation; either version 2
8 * version 2 of the License, or (at your option) any later version. 8 * of the License, or (at your option) any later version.
9 * 9 *
10 * This library is distributed in the hope that it will be useful, 10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * Library General Public License for more details. 13 * GNU General Public License for more details.
14 */ 14 */
15 15
16#ifndef __DA9211_REGISTERS_H__ 16#ifndef __DA9211_REGISTERS_H__
diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
index 42865681c00b..4940e8287df6 100644
--- a/drivers/regulator/fan53555.c
+++ b/drivers/regulator/fan53555.c
@@ -439,6 +439,7 @@ static const struct i2c_device_id fan53555_id[] = {
439 }, 439 },
440 { }, 440 { },
441}; 441};
442MODULE_DEVICE_TABLE(i2c, fan53555_id);
442 443
443static struct i2c_driver fan53555_regulator_driver = { 444static struct i2c_driver fan53555_regulator_driver = {
444 .driver = { 445 .driver = {
diff --git a/drivers/regulator/isl6271a-regulator.c b/drivers/regulator/isl6271a-regulator.c
index 6e5da95fa025..4abd8e9c81e5 100644
--- a/drivers/regulator/isl6271a-regulator.c
+++ b/drivers/regulator/isl6271a-regulator.c
@@ -156,7 +156,6 @@ MODULE_DEVICE_TABLE(i2c, isl6271a_id);
156static struct i2c_driver isl6271a_i2c_driver = { 156static struct i2c_driver isl6271a_i2c_driver = {
157 .driver = { 157 .driver = {
158 .name = "isl6271a", 158 .name = "isl6271a",
159 .owner = THIS_MODULE,
160 }, 159 },
161 .probe = isl6271a_probe, 160 .probe = isl6271a_probe,
162 .id_table = isl6271a_id, 161 .id_table = isl6271a_id,
diff --git a/drivers/regulator/isl9305.c b/drivers/regulator/isl9305.c
index 6e3a15fe00f1..257c1943e753 100644
--- a/drivers/regulator/isl9305.c
+++ b/drivers/regulator/isl9305.c
@@ -183,6 +183,7 @@ static const struct of_device_id isl9305_dt_ids[] = {
183 { .compatible = "isil,isl9305h" }, 183 { .compatible = "isil,isl9305h" },
184 {}, 184 {},
185}; 185};
186MODULE_DEVICE_TABLE(of, isl9305_dt_ids);
186#endif 187#endif
187 188
188static const struct i2c_device_id isl9305_i2c_id[] = { 189static const struct i2c_device_id isl9305_i2c_id[] = {
@@ -195,7 +196,6 @@ MODULE_DEVICE_TABLE(i2c, isl9305_i2c_id);
195static struct i2c_driver isl9305_regulator_driver = { 196static struct i2c_driver isl9305_regulator_driver = {
196 .driver = { 197 .driver = {
197 .name = "isl9305", 198 .name = "isl9305",
198 .owner = THIS_MODULE,
199 .of_match_table = of_match_ptr(isl9305_dt_ids), 199 .of_match_table = of_match_ptr(isl9305_dt_ids),
200 }, 200 },
201 .probe = isl9305_i2c_probe, 201 .probe = isl9305_i2c_probe,
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index 66fd2330dca0..15c25c622edf 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -452,7 +452,6 @@ MODULE_DEVICE_TABLE(i2c, lp3971_i2c_id);
452static struct i2c_driver lp3971_i2c_driver = { 452static struct i2c_driver lp3971_i2c_driver = {
453 .driver = { 453 .driver = {
454 .name = "LP3971", 454 .name = "LP3971",
455 .owner = THIS_MODULE,
456 }, 455 },
457 .probe = lp3971_i2c_probe, 456 .probe = lp3971_i2c_probe,
458 .id_table = lp3971_i2c_id, 457 .id_table = lp3971_i2c_id,
diff --git a/drivers/regulator/lp3972.c b/drivers/regulator/lp3972.c
index aea485afcc1a..3a7e96e2c7b3 100644
--- a/drivers/regulator/lp3972.c
+++ b/drivers/regulator/lp3972.c
@@ -550,7 +550,6 @@ MODULE_DEVICE_TABLE(i2c, lp3972_i2c_id);
550static struct i2c_driver lp3972_i2c_driver = { 550static struct i2c_driver lp3972_i2c_driver = {
551 .driver = { 551 .driver = {
552 .name = "lp3972", 552 .name = "lp3972",
553 .owner = THIS_MODULE,
554 }, 553 },
555 .probe = lp3972_i2c_probe, 554 .probe = lp3972_i2c_probe,
556 .id_table = lp3972_i2c_id, 555 .id_table = lp3972_i2c_id,
diff --git a/drivers/regulator/lp872x.c b/drivers/regulator/lp872x.c
index 3de328ab41f3..e5af07208f9d 100644
--- a/drivers/regulator/lp872x.c
+++ b/drivers/regulator/lp872x.c
@@ -849,7 +849,7 @@ static struct lp872x_platform_data
849 849
850 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); 850 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
851 if (!pdata) 851 if (!pdata)
852 goto out; 852 return ERR_PTR(-ENOMEM);
853 853
854 of_property_read_u8(np, "ti,general-config", &pdata->general_config); 854 of_property_read_u8(np, "ti,general-config", &pdata->general_config);
855 if (of_find_property(np, "ti,update-config", NULL)) 855 if (of_find_property(np, "ti,update-config", NULL))
@@ -857,7 +857,7 @@ static struct lp872x_platform_data
857 857
858 pdata->dvs = devm_kzalloc(dev, sizeof(struct lp872x_dvs), GFP_KERNEL); 858 pdata->dvs = devm_kzalloc(dev, sizeof(struct lp872x_dvs), GFP_KERNEL);
859 if (!pdata->dvs) 859 if (!pdata->dvs)
860 goto out; 860 return ERR_PTR(-ENOMEM);
861 861
862 pdata->dvs->gpio = of_get_named_gpio(np, "ti,dvs-gpio", 0); 862 pdata->dvs->gpio = of_get_named_gpio(np, "ti,dvs-gpio", 0);
863 of_property_read_u8(np, "ti,dvs-vsel", (u8 *)&pdata->dvs->vsel); 863 of_property_read_u8(np, "ti,dvs-vsel", (u8 *)&pdata->dvs->vsel);
@@ -903,15 +903,21 @@ static struct lp872x_platform_data
903static int lp872x_probe(struct i2c_client *cl, const struct i2c_device_id *id) 903static int lp872x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
904{ 904{
905 struct lp872x *lp; 905 struct lp872x *lp;
906 struct lp872x_platform_data *pdata;
906 int ret; 907 int ret;
907 const int lp872x_num_regulators[] = { 908 const int lp872x_num_regulators[] = {
908 [LP8720] = LP8720_NUM_REGULATORS, 909 [LP8720] = LP8720_NUM_REGULATORS,
909 [LP8725] = LP8725_NUM_REGULATORS, 910 [LP8725] = LP8725_NUM_REGULATORS,
910 }; 911 };
911 912
912 if (cl->dev.of_node) 913 if (cl->dev.of_node) {
913 cl->dev.platform_data = lp872x_populate_pdata_from_dt(&cl->dev, 914 pdata = lp872x_populate_pdata_from_dt(&cl->dev,
914 (enum lp872x_id)id->driver_data); 915 (enum lp872x_id)id->driver_data);
916 if (IS_ERR(pdata))
917 return PTR_ERR(pdata);
918 } else {
919 pdata = dev_get_platdata(&cl->dev);
920 }
915 921
916 lp = devm_kzalloc(&cl->dev, sizeof(struct lp872x), GFP_KERNEL); 922 lp = devm_kzalloc(&cl->dev, sizeof(struct lp872x), GFP_KERNEL);
917 if (!lp) 923 if (!lp)
@@ -927,7 +933,7 @@ static int lp872x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
927 } 933 }
928 934
929 lp->dev = &cl->dev; 935 lp->dev = &cl->dev;
930 lp->pdata = dev_get_platdata(&cl->dev); 936 lp->pdata = pdata;
931 lp->chipid = id->driver_data; 937 lp->chipid = id->driver_data;
932 i2c_set_clientdata(cl, lp); 938 i2c_set_clientdata(cl, lp);
933 939
@@ -955,7 +961,6 @@ MODULE_DEVICE_TABLE(i2c, lp872x_ids);
955static struct i2c_driver lp872x_driver = { 961static struct i2c_driver lp872x_driver = {
956 .driver = { 962 .driver = {
957 .name = "lp872x", 963 .name = "lp872x",
958 .owner = THIS_MODULE,
959 .of_match_table = of_match_ptr(lp872x_dt_ids), 964 .of_match_table = of_match_ptr(lp872x_dt_ids),
960 }, 965 },
961 .probe = lp872x_probe, 966 .probe = lp872x_probe,
diff --git a/drivers/regulator/ltc3589.c b/drivers/regulator/ltc3589.c
index 0ce8e4e0fa73..972c386b2690 100644
--- a/drivers/regulator/ltc3589.c
+++ b/drivers/regulator/ltc3589.c
@@ -378,7 +378,7 @@ static bool ltc3589_volatile_reg(struct device *dev, unsigned int reg)
378 return false; 378 return false;
379} 379}
380 380
381static struct reg_default ltc3589_reg_defaults[] = { 381static const struct reg_default ltc3589_reg_defaults[] = {
382 { LTC3589_SCR1, 0x00 }, 382 { LTC3589_SCR1, 0x00 },
383 { LTC3589_OVEN, 0x00 }, 383 { LTC3589_OVEN, 0x00 },
384 { LTC3589_SCR2, 0x00 }, 384 { LTC3589_SCR2, 0x00 },
@@ -542,7 +542,6 @@ MODULE_DEVICE_TABLE(i2c, ltc3589_i2c_id);
542static struct i2c_driver ltc3589_driver = { 542static struct i2c_driver ltc3589_driver = {
543 .driver = { 543 .driver = {
544 .name = DRIVER_NAME, 544 .name = DRIVER_NAME,
545 .owner = THIS_MODULE,
546 }, 545 },
547 .probe = ltc3589_probe, 546 .probe = ltc3589_probe,
548 .id_table = ltc3589_i2c_id, 547 .id_table = ltc3589_i2c_id,
@@ -552,4 +551,3 @@ module_i2c_driver(ltc3589_driver);
552MODULE_AUTHOR("Philipp Zabel <p.zabel@pengutronix.de>"); 551MODULE_AUTHOR("Philipp Zabel <p.zabel@pengutronix.de>");
553MODULE_DESCRIPTION("Regulator driver for Linear Technology LTC3589(-1,2)"); 552MODULE_DESCRIPTION("Regulator driver for Linear Technology LTC3589(-1,2)");
554MODULE_LICENSE("GPL v2"); 553MODULE_LICENSE("GPL v2");
555MODULE_ALIAS("i2c:ltc3589");
diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c
index d2a8c64cae42..2c1228d5796a 100644
--- a/drivers/regulator/max1586.c
+++ b/drivers/regulator/max1586.c
@@ -304,7 +304,6 @@ static struct i2c_driver max1586_pmic_driver = {
304 .probe = max1586_pmic_probe, 304 .probe = max1586_pmic_probe,
305 .driver = { 305 .driver = {
306 .name = "max1586", 306 .name = "max1586",
307 .owner = THIS_MODULE,
308 .of_match_table = of_match_ptr(max1586_of_match), 307 .of_match_table = of_match_ptr(max1586_of_match),
309 }, 308 },
310 .id_table = max1586_id, 309 .id_table = max1586_id,
diff --git a/drivers/regulator/max77693.c b/drivers/regulator/max77693.c
index 38722c8311a5..de730fd3f8a5 100644
--- a/drivers/regulator/max77693.c
+++ b/drivers/regulator/max77693.c
@@ -1,8 +1,9 @@
1/* 1/*
2 * max77693.c - Regulator driver for the Maxim 77693 2 * max77693.c - Regulator driver for the Maxim 77693 and 77843
3 * 3 *
4 * Copyright (C) 2013 Samsung Electronics 4 * Copyright (C) 2013-2015 Samsung Electronics
5 * Jonghwa Lee <jonghwa3.lee@samsung.com> 5 * Jonghwa Lee <jonghwa3.lee@samsung.com>
6 * Krzysztof Kozlowski <k.kozlowski.k@gmail.com>
6 * 7 *
7 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
@@ -29,38 +30,64 @@
29#include <linux/regulator/driver.h> 30#include <linux/regulator/driver.h>
30#include <linux/regulator/machine.h> 31#include <linux/regulator/machine.h>
31#include <linux/mfd/max77693.h> 32#include <linux/mfd/max77693.h>
33#include <linux/mfd/max77693-common.h>
32#include <linux/mfd/max77693-private.h> 34#include <linux/mfd/max77693-private.h>
35#include <linux/mfd/max77843-private.h>
33#include <linux/regulator/of_regulator.h> 36#include <linux/regulator/of_regulator.h>
34#include <linux/regmap.h> 37#include <linux/regmap.h>
35 38
36#define CHGIN_ILIM_STEP_20mA 20000 39/*
40 * ID for MAX77843 regulators.
41 * There is no need for such for MAX77693.
42 */
43enum max77843_regulator_type {
44 MAX77843_SAFEOUT1 = 0,
45 MAX77843_SAFEOUT2,
46 MAX77843_CHARGER,
47
48 MAX77843_NUM,
49};
50
51/* Register differences between chargers: MAX77693 and MAX77843 */
52struct chg_reg_data {
53 unsigned int linear_reg;
54 unsigned int linear_mask;
55 unsigned int uA_step;
56 unsigned int min_sel;
57};
37 58
38/* 59/*
39 * CHARGER regulator - Min : 20mA, Max : 2580mA, step : 20mA 60 * MAX77693 CHARGER regulator - Min : 20mA, Max : 2580mA, step : 20mA
40 * 0x00, 0x01, 0x2, 0x03 = 60 mA 61 * 0x00, 0x01, 0x2, 0x03 = 60 mA
41 * 0x04 ~ 0x7E = (60 + (X - 3) * 20) mA 62 * 0x04 ~ 0x7E = (60 + (X - 3) * 20) mA
63 * Actually for MAX77693 the driver manipulates the maximum input current,
64 * not the fast charge current (output). This should be fixed.
65 *
66 * On MAX77843 the calculation formula is the same (except values).
67 * Fortunately it properly manipulates the fast charge current.
42 */ 68 */
43static int max77693_chg_get_current_limit(struct regulator_dev *rdev) 69static int max77693_chg_get_current_limit(struct regulator_dev *rdev)
44{ 70{
71 const struct chg_reg_data *reg_data = rdev_get_drvdata(rdev);
45 unsigned int chg_min_uA = rdev->constraints->min_uA; 72 unsigned int chg_min_uA = rdev->constraints->min_uA;
46 unsigned int chg_max_uA = rdev->constraints->max_uA; 73 unsigned int chg_max_uA = rdev->constraints->max_uA;
47 unsigned int reg, sel; 74 unsigned int reg, sel;
48 unsigned int val; 75 unsigned int val;
49 int ret; 76 int ret;
50 77
51 ret = regmap_read(rdev->regmap, MAX77693_CHG_REG_CHG_CNFG_09, &reg); 78 ret = regmap_read(rdev->regmap, reg_data->linear_reg, &reg);
52 if (ret < 0) 79 if (ret < 0)
53 return ret; 80 return ret;
54 81
55 sel = reg & CHG_CNFG_09_CHGIN_ILIM_MASK; 82 sel = reg & reg_data->linear_mask;
56 83
57 /* the first four codes for charger current are all 60mA */ 84 /* the first four codes for charger current are all 60mA */
58 if (sel <= 3) 85 if (sel <= reg_data->min_sel)
59 sel = 0; 86 sel = 0;
60 else 87 else
61 sel -= 3; 88 sel -= reg_data->min_sel;
62 89
63 val = chg_min_uA + CHGIN_ILIM_STEP_20mA * sel; 90 val = chg_min_uA + reg_data->uA_step * sel;
64 if (val > chg_max_uA) 91 if (val > chg_max_uA)
65 return -EINVAL; 92 return -EINVAL;
66 93
@@ -70,23 +97,43 @@ static int max77693_chg_get_current_limit(struct regulator_dev *rdev)
70static int max77693_chg_set_current_limit(struct regulator_dev *rdev, 97static int max77693_chg_set_current_limit(struct regulator_dev *rdev,
71 int min_uA, int max_uA) 98 int min_uA, int max_uA)
72{ 99{
100 const struct chg_reg_data *reg_data = rdev_get_drvdata(rdev);
73 unsigned int chg_min_uA = rdev->constraints->min_uA; 101 unsigned int chg_min_uA = rdev->constraints->min_uA;
74 int sel = 0; 102 int sel = 0;
75 103
76 while (chg_min_uA + CHGIN_ILIM_STEP_20mA * sel < min_uA) 104 while (chg_min_uA + reg_data->uA_step * sel < min_uA)
77 sel++; 105 sel++;
78 106
79 if (chg_min_uA + CHGIN_ILIM_STEP_20mA * sel > max_uA) 107 if (chg_min_uA + reg_data->uA_step * sel > max_uA)
80 return -EINVAL; 108 return -EINVAL;
81 109
82 /* the first four codes for charger current are all 60mA */ 110 /* the first four codes for charger current are all 60mA */
83 sel += 3; 111 sel += reg_data->min_sel;
84 112
85 return regmap_write(rdev->regmap, 113 return regmap_write(rdev->regmap, reg_data->linear_reg, sel);
86 MAX77693_CHG_REG_CHG_CNFG_09, sel);
87} 114}
88/* end of CHARGER regulator ops */ 115/* end of CHARGER regulator ops */
89 116
117/* Returns regmap suitable for given regulator on chosen device */
118static struct regmap *max77693_get_regmap(enum max77693_types type,
119 struct max77693_dev *max77693,
120 int reg_id)
121{
122 if (type == TYPE_MAX77693)
123 return max77693->regmap;
124
125 /* Else: TYPE_MAX77843 */
126 switch (reg_id) {
127 case MAX77843_SAFEOUT1:
128 case MAX77843_SAFEOUT2:
129 return max77693->regmap;
130 case MAX77843_CHARGER:
131 return max77693->regmap_chg;
132 default:
133 return max77693->regmap;
134 }
135}
136
90static const unsigned int max77693_safeout_table[] = { 137static const unsigned int max77693_safeout_table[] = {
91 4850000, 138 4850000,
92 4900000, 139 4900000,
@@ -111,7 +158,7 @@ static struct regulator_ops max77693_charger_ops = {
111 .set_current_limit = max77693_chg_set_current_limit, 158 .set_current_limit = max77693_chg_set_current_limit,
112}; 159};
113 160
114#define regulator_desc_esafeout(_num) { \ 161#define max77693_regulator_desc_esafeout(_num) { \
115 .name = "ESAFEOUT"#_num, \ 162 .name = "ESAFEOUT"#_num, \
116 .id = MAX77693_ESAFEOUT##_num, \ 163 .id = MAX77693_ESAFEOUT##_num, \
117 .of_match = of_match_ptr("ESAFEOUT"#_num), \ 164 .of_match = of_match_ptr("ESAFEOUT"#_num), \
@@ -127,9 +174,9 @@ static struct regulator_ops max77693_charger_ops = {
127 .enable_mask = SAFEOUT_CTRL_ENSAFEOUT##_num##_MASK , \ 174 .enable_mask = SAFEOUT_CTRL_ENSAFEOUT##_num##_MASK , \
128} 175}
129 176
130static const struct regulator_desc regulators[] = { 177static const struct regulator_desc max77693_supported_regulators[] = {
131 regulator_desc_esafeout(1), 178 max77693_regulator_desc_esafeout(1),
132 regulator_desc_esafeout(2), 179 max77693_regulator_desc_esafeout(2),
133 { 180 {
134 .name = "CHARGER", 181 .name = "CHARGER",
135 .id = MAX77693_CHARGER, 182 .id = MAX77693_CHARGER,
@@ -145,18 +192,86 @@ static const struct regulator_desc regulators[] = {
145 }, 192 },
146}; 193};
147 194
195static const struct chg_reg_data max77693_chg_reg_data = {
196 .linear_reg = MAX77693_CHG_REG_CHG_CNFG_09,
197 .linear_mask = CHG_CNFG_09_CHGIN_ILIM_MASK,
198 .uA_step = 20000,
199 .min_sel = 3,
200};
201
202#define max77843_regulator_desc_esafeout(num) { \
203 .name = "SAFEOUT" # num, \
204 .id = MAX77843_SAFEOUT ## num, \
205 .ops = &max77693_safeout_ops, \
206 .of_match = of_match_ptr("SAFEOUT" # num), \
207 .regulators_node = of_match_ptr("regulators"), \
208 .type = REGULATOR_VOLTAGE, \
209 .owner = THIS_MODULE, \
210 .n_voltages = ARRAY_SIZE(max77693_safeout_table), \
211 .volt_table = max77693_safeout_table, \
212 .enable_reg = MAX77843_SYS_REG_SAFEOUTCTRL, \
213 .enable_mask = MAX77843_REG_SAFEOUTCTRL_ENSAFEOUT ## num, \
214 .vsel_reg = MAX77843_SYS_REG_SAFEOUTCTRL, \
215 .vsel_mask = MAX77843_REG_SAFEOUTCTRL_SAFEOUT ## num ## _MASK, \
216}
217
218static const struct regulator_desc max77843_supported_regulators[] = {
219 [MAX77843_SAFEOUT1] = max77843_regulator_desc_esafeout(1),
220 [MAX77843_SAFEOUT2] = max77843_regulator_desc_esafeout(2),
221 [MAX77843_CHARGER] = {
222 .name = "CHARGER",
223 .id = MAX77843_CHARGER,
224 .ops = &max77693_charger_ops,
225 .of_match = of_match_ptr("CHARGER"),
226 .regulators_node = of_match_ptr("regulators"),
227 .type = REGULATOR_CURRENT,
228 .owner = THIS_MODULE,
229 .enable_reg = MAX77843_CHG_REG_CHG_CNFG_00,
230 .enable_mask = MAX77843_CHG_MASK,
231 .enable_val = MAX77843_CHG_MASK,
232 },
233};
234
235static const struct chg_reg_data max77843_chg_reg_data = {
236 .linear_reg = MAX77843_CHG_REG_CHG_CNFG_02,
237 .linear_mask = MAX77843_CHG_FAST_CHG_CURRENT_MASK,
238 .uA_step = MAX77843_CHG_FAST_CHG_CURRENT_STEP,
239 .min_sel = 2,
240};
241
148static int max77693_pmic_probe(struct platform_device *pdev) 242static int max77693_pmic_probe(struct platform_device *pdev)
149{ 243{
244 enum max77693_types type = platform_get_device_id(pdev)->driver_data;
150 struct max77693_dev *iodev = dev_get_drvdata(pdev->dev.parent); 245 struct max77693_dev *iodev = dev_get_drvdata(pdev->dev.parent);
246 const struct regulator_desc *regulators;
247 unsigned int regulators_size;
151 int i; 248 int i;
152 struct regulator_config config = { }; 249 struct regulator_config config = { };
153 250
154 config.dev = iodev->dev; 251 config.dev = iodev->dev;
155 config.regmap = iodev->regmap;
156 252
157 for (i = 0; i < ARRAY_SIZE(regulators); i++) { 253 switch (type) {
254 case TYPE_MAX77693:
255 regulators = max77693_supported_regulators;
256 regulators_size = ARRAY_SIZE(max77693_supported_regulators);
257 config.driver_data = (void *)&max77693_chg_reg_data;
258 break;
259 case TYPE_MAX77843:
260 regulators = max77843_supported_regulators;
261 regulators_size = ARRAY_SIZE(max77843_supported_regulators);
262 config.driver_data = (void *)&max77843_chg_reg_data;
263 break;
264 default:
265 dev_err(&pdev->dev, "Unsupported device type: %u\n", type);
266 return -ENODEV;
267 }
268
269 for (i = 0; i < regulators_size; i++) {
158 struct regulator_dev *rdev; 270 struct regulator_dev *rdev;
159 271
272 config.regmap = max77693_get_regmap(type, iodev,
273 regulators[i].id);
274
160 rdev = devm_regulator_register(&pdev->dev, 275 rdev = devm_regulator_register(&pdev->dev,
161 &regulators[i], &config); 276 &regulators[i], &config);
162 if (IS_ERR(rdev)) { 277 if (IS_ERR(rdev)) {
@@ -170,7 +285,8 @@ static int max77693_pmic_probe(struct platform_device *pdev)
170} 285}
171 286
172static const struct platform_device_id max77693_pmic_id[] = { 287static const struct platform_device_id max77693_pmic_id[] = {
173 {"max77693-pmic", 0}, 288 { "max77693-pmic", TYPE_MAX77693 },
289 { "max77843-regulator", TYPE_MAX77843 },
174 {}, 290 {},
175}; 291};
176 292
@@ -184,8 +300,19 @@ static struct platform_driver max77693_pmic_driver = {
184 .id_table = max77693_pmic_id, 300 .id_table = max77693_pmic_id,
185}; 301};
186 302
187module_platform_driver(max77693_pmic_driver); 303static int __init max77693_pmic_init(void)
304{
305 return platform_driver_register(&max77693_pmic_driver);
306}
307subsys_initcall(max77693_pmic_init);
308
309static void __exit max77693_pmic_cleanup(void)
310{
311 platform_driver_unregister(&max77693_pmic_driver);
312}
313module_exit(max77693_pmic_cleanup);
188 314
189MODULE_DESCRIPTION("MAXIM MAX77693 regulator driver"); 315MODULE_DESCRIPTION("MAXIM 77693/77843 regulator driver");
190MODULE_AUTHOR("Jonghwa Lee <jonghwa3.lee@samsung.com>"); 316MODULE_AUTHOR("Jonghwa Lee <jonghwa3.lee@samsung.com>");
317MODULE_AUTHOR("Krzysztof Kozlowski <k.kozlowski.k@gmail.com>");
191MODULE_LICENSE("GPL"); 318MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/max77843.c b/drivers/regulator/max77843.c
deleted file mode 100644
index f4fd0d3cfa6e..000000000000
--- a/drivers/regulator/max77843.c
+++ /dev/null
@@ -1,201 +0,0 @@
1/*
2 * max77843.c - Regulator driver for the Maxim MAX77843
3 *
4 * Copyright (C) 2015 Samsung Electronics
5 * Author: Jaewon Kim <jaewon02.kim@samsung.com>
6 * Author: Beomho Seo <beomho.seo@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/module.h>
15#include <linux/platform_device.h>
16#include <linux/regulator/driver.h>
17#include <linux/regulator/machine.h>
18#include <linux/mfd/max77843-private.h>
19#include <linux/regulator/of_regulator.h>
20
21enum max77843_regulator_type {
22 MAX77843_SAFEOUT1 = 0,
23 MAX77843_SAFEOUT2,
24 MAX77843_CHARGER,
25
26 MAX77843_NUM,
27};
28
29static const unsigned int max77843_safeout_voltage_table[] = {
30 4850000,
31 4900000,
32 4950000,
33 3300000,
34};
35
36static int max77843_reg_get_current_limit(struct regulator_dev *rdev)
37{
38 struct regmap *regmap = rdev->regmap;
39 unsigned int chg_min_uA = rdev->constraints->min_uA;
40 unsigned int chg_max_uA = rdev->constraints->max_uA;
41 unsigned int val;
42 int ret;
43 unsigned int reg, sel;
44
45 ret = regmap_read(regmap, MAX77843_CHG_REG_CHG_CNFG_02, &reg);
46 if (ret) {
47 dev_err(&rdev->dev, "Failed to read charger register\n");
48 return ret;
49 }
50
51 sel = reg & MAX77843_CHG_FAST_CHG_CURRENT_MASK;
52
53 if (sel < 0x03)
54 sel = 0;
55 else
56 sel -= 2;
57
58 val = chg_min_uA + MAX77843_CHG_FAST_CHG_CURRENT_STEP * sel;
59 if (val > chg_max_uA)
60 return -EINVAL;
61
62 return val;
63}
64
65static int max77843_reg_set_current_limit(struct regulator_dev *rdev,
66 int min_uA, int max_uA)
67{
68 struct regmap *regmap = rdev->regmap;
69 unsigned int chg_min_uA = rdev->constraints->min_uA;
70 int sel = 0;
71
72 while (chg_min_uA + MAX77843_CHG_FAST_CHG_CURRENT_STEP * sel < min_uA)
73 sel++;
74
75 if (chg_min_uA + MAX77843_CHG_FAST_CHG_CURRENT_STEP * sel > max_uA)
76 return -EINVAL;
77
78 sel += 2;
79
80 return regmap_write(regmap, MAX77843_CHG_REG_CHG_CNFG_02, sel);
81}
82
83static struct regulator_ops max77843_charger_ops = {
84 .is_enabled = regulator_is_enabled_regmap,
85 .enable = regulator_enable_regmap,
86 .disable = regulator_disable_regmap,
87 .get_current_limit = max77843_reg_get_current_limit,
88 .set_current_limit = max77843_reg_set_current_limit,
89};
90
91static struct regulator_ops max77843_regulator_ops = {
92 .is_enabled = regulator_is_enabled_regmap,
93 .enable = regulator_enable_regmap,
94 .disable = regulator_disable_regmap,
95 .list_voltage = regulator_list_voltage_table,
96 .get_voltage_sel = regulator_get_voltage_sel_regmap,
97 .set_voltage_sel = regulator_set_voltage_sel_regmap,
98};
99
100#define MAX77843_SAFEOUT(num) { \
101 .name = "SAFEOUT" # num, \
102 .id = MAX77843_SAFEOUT ## num, \
103 .ops = &max77843_regulator_ops, \
104 .of_match = of_match_ptr("SAFEOUT" # num), \
105 .regulators_node = of_match_ptr("regulators"), \
106 .type = REGULATOR_VOLTAGE, \
107 .owner = THIS_MODULE, \
108 .n_voltages = ARRAY_SIZE(max77843_safeout_voltage_table), \
109 .volt_table = max77843_safeout_voltage_table, \
110 .enable_reg = MAX77843_SYS_REG_SAFEOUTCTRL, \
111 .enable_mask = MAX77843_REG_SAFEOUTCTRL_ENSAFEOUT ## num, \
112 .vsel_reg = MAX77843_SYS_REG_SAFEOUTCTRL, \
113 .vsel_mask = MAX77843_REG_SAFEOUTCTRL_SAFEOUT ## num ## _MASK, \
114}
115
116static const struct regulator_desc max77843_supported_regulators[] = {
117 [MAX77843_SAFEOUT1] = MAX77843_SAFEOUT(1),
118 [MAX77843_SAFEOUT2] = MAX77843_SAFEOUT(2),
119 [MAX77843_CHARGER] = {
120 .name = "CHARGER",
121 .id = MAX77843_CHARGER,
122 .ops = &max77843_charger_ops,
123 .of_match = of_match_ptr("CHARGER"),
124 .regulators_node = of_match_ptr("regulators"),
125 .type = REGULATOR_CURRENT,
126 .owner = THIS_MODULE,
127 .enable_reg = MAX77843_CHG_REG_CHG_CNFG_00,
128 .enable_mask = MAX77843_CHG_MASK | MAX77843_CHG_BUCK_MASK,
129 .enable_val = MAX77843_CHG_MASK | MAX77843_CHG_BUCK_MASK,
130 },
131};
132
133static struct regmap *max77843_get_regmap(struct max77843 *max77843, int reg_id)
134{
135 switch (reg_id) {
136 case MAX77843_SAFEOUT1:
137 case MAX77843_SAFEOUT2:
138 return max77843->regmap;
139 case MAX77843_CHARGER:
140 return max77843->regmap_chg;
141 default:
142 return max77843->regmap;
143 }
144}
145
146static int max77843_regulator_probe(struct platform_device *pdev)
147{
148 struct max77843 *max77843 = dev_get_drvdata(pdev->dev.parent);
149 struct regulator_config config = {};
150 int i;
151
152 config.dev = max77843->dev;
153 config.driver_data = max77843;
154
155 for (i = 0; i < ARRAY_SIZE(max77843_supported_regulators); i++) {
156 struct regulator_dev *regulator;
157
158 config.regmap = max77843_get_regmap(max77843,
159 max77843_supported_regulators[i].id);
160
161 regulator = devm_regulator_register(&pdev->dev,
162 &max77843_supported_regulators[i], &config);
163 if (IS_ERR(regulator)) {
164 dev_err(&pdev->dev,
165 "Failed to regiser regulator-%d\n", i);
166 return PTR_ERR(regulator);
167 }
168 }
169
170 return 0;
171}
172
173static const struct platform_device_id max77843_regulator_id[] = {
174 { "max77843-regulator", },
175 { /* sentinel */ },
176};
177
178static struct platform_driver max77843_regulator_driver = {
179 .driver = {
180 .name = "max77843-regulator",
181 },
182 .probe = max77843_regulator_probe,
183 .id_table = max77843_regulator_id,
184};
185
186static int __init max77843_regulator_init(void)
187{
188 return platform_driver_register(&max77843_regulator_driver);
189}
190subsys_initcall(max77843_regulator_init);
191
192static void __exit max77843_regulator_exit(void)
193{
194 platform_driver_unregister(&max77843_regulator_driver);
195}
196module_exit(max77843_regulator_exit);
197
198MODULE_AUTHOR("Jaewon Kim <jaewon02.kim@samsung.com>");
199MODULE_AUTHOR("Beomho Seo <beomho.seo@samsung.com>");
200MODULE_DESCRIPTION("Maxim MAX77843 regulator driver");
201MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
index 4071d74fa828..b87f62dd484e 100644
--- a/drivers/regulator/max8660.c
+++ b/drivers/regulator/max8660.c
@@ -518,7 +518,6 @@ static struct i2c_driver max8660_driver = {
518 .probe = max8660_probe, 518 .probe = max8660_probe,
519 .driver = { 519 .driver = {
520 .name = "max8660", 520 .name = "max8660",
521 .owner = THIS_MODULE,
522 }, 521 },
523 .id_table = max8660_id, 522 .id_table = max8660_id,
524}; 523};
diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
index 6f2bdad8b4d8..5b75b7c2e3ea 100644
--- a/drivers/regulator/max8973-regulator.c
+++ b/drivers/regulator/max8973-regulator.c
@@ -75,6 +75,7 @@
75#define MAX8973_DISCH_ENBABLE BIT(5) 75#define MAX8973_DISCH_ENBABLE BIT(5)
76#define MAX8973_FT_ENABLE BIT(4) 76#define MAX8973_FT_ENABLE BIT(4)
77 77
78#define MAX8973_CKKADV_TRIP_MASK 0xC
78#define MAX8973_CKKADV_TRIP_DISABLE 0xC 79#define MAX8973_CKKADV_TRIP_DISABLE 0xC
79#define MAX8973_CKKADV_TRIP_75mV_PER_US 0x0 80#define MAX8973_CKKADV_TRIP_75mV_PER_US 0x0
80#define MAX8973_CKKADV_TRIP_150mV_PER_US 0x4 81#define MAX8973_CKKADV_TRIP_150mV_PER_US 0x4
@@ -282,6 +283,55 @@ static int max8973_set_ramp_delay(struct regulator_dev *rdev,
282 return ret; 283 return ret;
283} 284}
284 285
286static int max8973_set_current_limit(struct regulator_dev *rdev,
287 int min_ua, int max_ua)
288{
289 struct max8973_chip *max = rdev_get_drvdata(rdev);
290 unsigned int val;
291 int ret;
292
293 if (max_ua <= 9000000)
294 val = MAX8973_CKKADV_TRIP_75mV_PER_US;
295 else if (max_ua <= 12000000)
296 val = MAX8973_CKKADV_TRIP_150mV_PER_US;
297 else
298 val = MAX8973_CKKADV_TRIP_DISABLE;
299
300 ret = regmap_update_bits(max->regmap, MAX8973_CONTROL2,
301 MAX8973_CKKADV_TRIP_MASK, val);
302 if (ret < 0) {
303 dev_err(max->dev, "register %d update failed: %d\n",
304 MAX8973_CONTROL2, ret);
305 return ret;
306 }
307 return 0;
308}
309
310static int max8973_get_current_limit(struct regulator_dev *rdev)
311{
312 struct max8973_chip *max = rdev_get_drvdata(rdev);
313 unsigned int control2;
314 int ret;
315
316 ret = regmap_read(max->regmap, MAX8973_CONTROL2, &control2);
317 if (ret < 0) {
318 dev_err(max->dev, "register %d read failed: %d\n",
319 MAX8973_CONTROL2, ret);
320 return ret;
321 }
322 switch (control2 & MAX8973_CKKADV_TRIP_MASK) {
323 case MAX8973_CKKADV_TRIP_DISABLE:
324 return 15000000;
325 case MAX8973_CKKADV_TRIP_150mV_PER_US:
326 return 12000000;
327 case MAX8973_CKKADV_TRIP_75mV_PER_US:
328 return 9000000;
329 default:
330 break;
331 }
332 return 9000000;
333}
334
285static const struct regulator_ops max8973_dcdc_ops = { 335static const struct regulator_ops max8973_dcdc_ops = {
286 .get_voltage_sel = max8973_dcdc_get_voltage_sel, 336 .get_voltage_sel = max8973_dcdc_get_voltage_sel,
287 .set_voltage_sel = max8973_dcdc_set_voltage_sel, 337 .set_voltage_sel = max8973_dcdc_set_voltage_sel,
@@ -421,6 +471,8 @@ static struct max8973_regulator_platform_data *max8973_parse_dt(
421 struct device_node *np = dev->of_node; 471 struct device_node *np = dev->of_node;
422 int ret; 472 int ret;
423 u32 pval; 473 u32 pval;
474 bool etr_enable;
475 bool etr_sensitivity_high;
424 476
425 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); 477 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
426 if (!pdata) 478 if (!pdata)
@@ -450,7 +502,24 @@ static struct max8973_regulator_platform_data *max8973_parse_dt(
450 pdata->control_flags |= MAX8973_CONTROL_FREQ_SHIFT_9PER_ENABLE; 502 pdata->control_flags |= MAX8973_CONTROL_FREQ_SHIFT_9PER_ENABLE;
451 503
452 if (of_property_read_bool(np, "maxim,enable-bias-control")) 504 if (of_property_read_bool(np, "maxim,enable-bias-control"))
453 pdata->control_flags |= MAX8973_BIAS_ENABLE; 505 pdata->control_flags |= MAX8973_CONTROL_BIAS_ENABLE;
506
507 etr_enable = of_property_read_bool(np, "maxim,enable-etr");
508 etr_sensitivity_high = of_property_read_bool(np,
509 "maxim,enable-high-etr-sensitivity");
510 if (etr_sensitivity_high)
511 etr_enable = true;
512
513 if (etr_enable) {
514 if (etr_sensitivity_high)
515 pdata->control_flags |=
516 MAX8973_CONTROL_CLKADV_TRIP_75mV_PER_US;
517 else
518 pdata->control_flags |=
519 MAX8973_CONTROL_CLKADV_TRIP_150mV_PER_US;
520 } else {
521 pdata->control_flags |= MAX8973_CONTROL_CLKADV_TRIP_DISABLED;
522 }
454 523
455 return pdata; 524 return pdata;
456} 525}
@@ -568,6 +637,15 @@ static int max8973_probe(struct i2c_client *client,
568 max->lru_index[i] = i; 637 max->lru_index[i] = i;
569 max->lru_index[0] = max->curr_vout_reg; 638 max->lru_index[0] = max->curr_vout_reg;
570 max->lru_index[max->curr_vout_reg] = 0; 639 max->lru_index[max->curr_vout_reg] = 0;
640 } else {
641 /*
642 * If there is no DVS GPIO, the VOUT register
643 * address is fixed.
644 */
645 max->ops.set_voltage_sel = regulator_set_voltage_sel_regmap;
646 max->ops.get_voltage_sel = regulator_get_voltage_sel_regmap;
647 max->desc.vsel_reg = max->curr_vout_reg;
648 max->desc.vsel_mask = MAX8973_VOUT_MASK;
571 } 649 }
572 650
573 if (pdata_from_dt) 651 if (pdata_from_dt)
@@ -613,6 +691,8 @@ static int max8973_probe(struct i2c_client *client,
613 max->ops.enable = regulator_enable_regmap; 691 max->ops.enable = regulator_enable_regmap;
614 max->ops.disable = regulator_disable_regmap; 692 max->ops.disable = regulator_disable_regmap;
615 max->ops.is_enabled = regulator_is_enabled_regmap; 693 max->ops.is_enabled = regulator_is_enabled_regmap;
694 max->ops.set_current_limit = max8973_set_current_limit;
695 max->ops.get_current_limit = max8973_get_current_limit;
616 break; 696 break;
617 default: 697 default:
618 break; 698 break;
@@ -652,7 +732,6 @@ static struct i2c_driver max8973_i2c_driver = {
652 .driver = { 732 .driver = {
653 .name = "max8973", 733 .name = "max8973",
654 .of_match_table = of_max8973_match_tbl, 734 .of_match_table = of_max8973_match_tbl,
655 .owner = THIS_MODULE,
656 }, 735 },
657 .probe = max8973_probe, 736 .probe = max8973_probe,
658 .id_table = max8973_id, 737 .id_table = max8973_id,
diff --git a/drivers/regulator/mt6311-regulator.c b/drivers/regulator/mt6311-regulator.c
new file mode 100644
index 000000000000..02c4e5feca8e
--- /dev/null
+++ b/drivers/regulator/mt6311-regulator.c
@@ -0,0 +1,179 @@
1/*
2 * Copyright (c) 2015 MediaTek Inc.
3 * Author: Henry Chen <henryc.chen@mediatek.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <linux/err.h>
16#include <linux/gpio.h>
17#include <linux/i2c.h>
18#include <linux/init.h>
19#include <linux/interrupt.h>
20#include <linux/module.h>
21#include <linux/regmap.h>
22#include <linux/regulator/driver.h>
23#include <linux/regulator/machine.h>
24#include <linux/regulator/of_regulator.h>
25#include <linux/regulator/mt6311.h>
26#include <linux/slab.h>
27#include "mt6311-regulator.h"
28
29static const struct regmap_config mt6311_regmap_config = {
30 .reg_bits = 8,
31 .val_bits = 8,
32 .max_register = MT6311_FQMTR_CON4,
33};
34
35/* Default limits measured in millivolts and milliamps */
36#define MT6311_MIN_UV 600000
37#define MT6311_MAX_UV 1393750
38#define MT6311_STEP_UV 6250
39
40static const struct regulator_linear_range buck_volt_range[] = {
41 REGULATOR_LINEAR_RANGE(MT6311_MIN_UV, 0, 0x7f, MT6311_STEP_UV),
42};
43
44static const struct regulator_ops mt6311_buck_ops = {
45 .list_voltage = regulator_list_voltage_linear_range,
46 .map_voltage = regulator_map_voltage_linear_range,
47 .set_voltage_sel = regulator_set_voltage_sel_regmap,
48 .get_voltage_sel = regulator_get_voltage_sel_regmap,
49 .set_voltage_time_sel = regulator_set_voltage_time_sel,
50 .enable = regulator_enable_regmap,
51 .disable = regulator_disable_regmap,
52 .is_enabled = regulator_is_enabled_regmap,
53};
54
55static const struct regulator_ops mt6311_ldo_ops = {
56 .enable = regulator_enable_regmap,
57 .disable = regulator_disable_regmap,
58 .is_enabled = regulator_is_enabled_regmap,
59};
60
61#define MT6311_BUCK(_id) \
62{\
63 .name = #_id,\
64 .ops = &mt6311_buck_ops,\
65 .of_match = of_match_ptr(#_id),\
66 .regulators_node = of_match_ptr("regulators"),\
67 .type = REGULATOR_VOLTAGE,\
68 .id = MT6311_ID_##_id,\
69 .n_voltages = (MT6311_MAX_UV - MT6311_MIN_UV) / MT6311_STEP_UV + 1,\
70 .min_uV = MT6311_MIN_UV,\
71 .uV_step = MT6311_STEP_UV,\
72 .owner = THIS_MODULE,\
73 .linear_ranges = buck_volt_range, \
74 .n_linear_ranges = ARRAY_SIZE(buck_volt_range), \
75 .enable_reg = MT6311_VDVFS11_CON9,\
76 .enable_mask = MT6311_PMIC_VDVFS11_EN_MASK,\
77 .vsel_reg = MT6311_VDVFS11_CON12,\
78 .vsel_mask = MT6311_PMIC_VDVFS11_VOSEL_MASK,\
79}
80
81#define MT6311_LDO(_id) \
82{\
83 .name = #_id,\
84 .ops = &mt6311_ldo_ops,\
85 .of_match = of_match_ptr(#_id),\
86 .regulators_node = of_match_ptr("regulators"),\
87 .type = REGULATOR_VOLTAGE,\
88 .id = MT6311_ID_##_id,\
89 .owner = THIS_MODULE,\
90 .enable_reg = MT6311_LDO_CON3,\
91 .enable_mask = MT6311_PMIC_RG_VBIASN_EN_MASK,\
92}
93
94static const struct regulator_desc mt6311_regulators[] = {
95 MT6311_BUCK(VDVFS),
96 MT6311_LDO(VBIASN),
97};
98
99/*
100 * I2C driver interface functions
101 */
102static int mt6311_i2c_probe(struct i2c_client *i2c,
103 const struct i2c_device_id *id)
104{
105 struct regulator_config config = { };
106 struct regulator_dev *rdev;
107 struct regmap *regmap;
108 int i, ret;
109 unsigned int data;
110
111 regmap = devm_regmap_init_i2c(i2c, &mt6311_regmap_config);
112 if (IS_ERR(regmap)) {
113 ret = PTR_ERR(regmap);
114 dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
115 ret);
116 return ret;
117 }
118
119 ret = regmap_read(regmap, MT6311_SWCID, &data);
120 if (ret < 0) {
121 dev_err(&i2c->dev, "Failed to read DEVICE_ID reg: %d\n", ret);
122 return ret;
123 }
124
125 switch (data) {
126 case MT6311_E1_CID_CODE:
127 case MT6311_E2_CID_CODE:
128 case MT6311_E3_CID_CODE:
129 break;
130 default:
131 dev_err(&i2c->dev, "Unsupported device id = 0x%x.\n", data);
132 return -ENODEV;
133 }
134
135 for (i = 0; i < MT6311_MAX_REGULATORS; i++) {
136 config.dev = &i2c->dev;
137 config.regmap = regmap;
138
139 rdev = devm_regulator_register(&i2c->dev,
140 &mt6311_regulators[i], &config);
141 if (IS_ERR(rdev)) {
142 dev_err(&i2c->dev,
143 "Failed to register MT6311 regulator\n");
144 return PTR_ERR(rdev);
145 }
146 }
147
148 return 0;
149}
150
151static const struct i2c_device_id mt6311_i2c_id[] = {
152 {"mt6311", 0},
153 {},
154};
155MODULE_DEVICE_TABLE(i2c, mt6311_i2c_id);
156
157#ifdef CONFIG_OF
158static const struct of_device_id mt6311_dt_ids[] = {
159 { .compatible = "mediatek,mt6311-regulator",
160 .data = &mt6311_i2c_id[0] },
161 {},
162};
163MODULE_DEVICE_TABLE(of, mt6311_dt_ids);
164#endif
165
166static struct i2c_driver mt6311_regulator_driver = {
167 .driver = {
168 .name = "mt6311",
169 .of_match_table = of_match_ptr(mt6311_dt_ids),
170 },
171 .probe = mt6311_i2c_probe,
172 .id_table = mt6311_i2c_id,
173};
174
175module_i2c_driver(mt6311_regulator_driver);
176
177MODULE_AUTHOR("Henry Chen <henryc.chen@mediatek.com>");
178MODULE_DESCRIPTION("Regulator device driver for Mediatek MT6311");
179MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/mt6311-regulator.h b/drivers/regulator/mt6311-regulator.h
new file mode 100644
index 000000000000..5218db46a798
--- /dev/null
+++ b/drivers/regulator/mt6311-regulator.h
@@ -0,0 +1,65 @@
1/*
2 * Copyright (c) 2015 MediaTek Inc.
3 * Author: Henry Chen <henryc.chen@mediatek.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#ifndef __MT6311_REGULATOR_H__
16#define __MT6311_REGULATOR_H__
17
18#define MT6311_SWCID 0x01
19
20#define MT6311_TOP_INT_CON 0x18
21#define MT6311_TOP_INT_MON 0x19
22
23#define MT6311_VDVFS11_CON0 0x87
24#define MT6311_VDVFS11_CON7 0x88
25#define MT6311_VDVFS11_CON8 0x89
26#define MT6311_VDVFS11_CON9 0x8A
27#define MT6311_VDVFS11_CON10 0x8B
28#define MT6311_VDVFS11_CON11 0x8C
29#define MT6311_VDVFS11_CON12 0x8D
30#define MT6311_VDVFS11_CON13 0x8E
31#define MT6311_VDVFS11_CON14 0x8F
32#define MT6311_VDVFS11_CON15 0x90
33#define MT6311_VDVFS11_CON16 0x91
34#define MT6311_VDVFS11_CON17 0x92
35#define MT6311_VDVFS11_CON18 0x93
36#define MT6311_VDVFS11_CON19 0x94
37
38#define MT6311_LDO_CON0 0xCC
39#define MT6311_LDO_OCFB0 0xCD
40#define MT6311_LDO_CON2 0xCE
41#define MT6311_LDO_CON3 0xCF
42#define MT6311_LDO_CON4 0xD0
43#define MT6311_FQMTR_CON0 0xD1
44#define MT6311_FQMTR_CON1 0xD2
45#define MT6311_FQMTR_CON2 0xD3
46#define MT6311_FQMTR_CON3 0xD4
47#define MT6311_FQMTR_CON4 0xD5
48
49#define MT6311_PMIC_RG_INT_POL_MASK 0x1
50#define MT6311_PMIC_RG_INT_EN_MASK 0x2
51#define MT6311_PMIC_RG_BUCK_OC_INT_STATUS_MASK 0x10
52
53#define MT6311_PMIC_VDVFS11_EN_CTRL_MASK 0x1
54#define MT6311_PMIC_VDVFS11_VOSEL_CTRL_MASK 0x2
55#define MT6311_PMIC_VDVFS11_EN_SEL_MASK 0x3
56#define MT6311_PMIC_VDVFS11_VOSEL_SEL_MASK 0xc
57#define MT6311_PMIC_VDVFS11_EN_MASK 0x1
58#define MT6311_PMIC_VDVFS11_VOSEL_MASK 0x7F
59#define MT6311_PMIC_VDVFS11_VOSEL_ON_MASK 0x7F
60#define MT6311_PMIC_VDVFS11_VOSEL_SLEEP_MASK 0x7F
61#define MT6311_PMIC_NI_VDVFS11_VOSEL_MASK 0x7F
62
63#define MT6311_PMIC_RG_VBIASN_EN_MASK 0x1
64
65#endif
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index b1c485b24ab2..250700c853bf 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -107,6 +107,9 @@ static void of_get_regulation_constraints(struct device_node *np,
107 if (!of_property_read_u32(np, "regulator-system-load", &pval)) 107 if (!of_property_read_u32(np, "regulator-system-load", &pval))
108 constraints->system_load = pval; 108 constraints->system_load = pval;
109 109
110 constraints->over_current_protection = of_property_read_bool(np,
111 "regulator-over-current-protection");
112
110 for (i = 0; i < ARRAY_SIZE(regulator_states); i++) { 113 for (i = 0; i < ARRAY_SIZE(regulator_states); i++) {
111 switch (i) { 114 switch (i) {
112 case PM_SUSPEND_MEM: 115 case PM_SUSPEND_MEM:
diff --git a/drivers/regulator/pbias-regulator.c b/drivers/regulator/pbias-regulator.c
index bd2b75c0d1d1..4fa7bcaf454e 100644
--- a/drivers/regulator/pbias-regulator.c
+++ b/drivers/regulator/pbias-regulator.c
@@ -30,6 +30,7 @@
30struct pbias_reg_info { 30struct pbias_reg_info {
31 u32 enable; 31 u32 enable;
32 u32 enable_mask; 32 u32 enable_mask;
33 u32 disable_val;
33 u32 vmode; 34 u32 vmode;
34 unsigned int enable_time; 35 unsigned int enable_time;
35 char *name; 36 char *name;
@@ -62,6 +63,7 @@ static const struct pbias_reg_info pbias_mmc_omap2430 = {
62 .enable = BIT(1), 63 .enable = BIT(1),
63 .enable_mask = BIT(1), 64 .enable_mask = BIT(1),
64 .vmode = BIT(0), 65 .vmode = BIT(0),
66 .disable_val = 0,
65 .enable_time = 100, 67 .enable_time = 100,
66 .name = "pbias_mmc_omap2430" 68 .name = "pbias_mmc_omap2430"
67}; 69};
@@ -77,6 +79,7 @@ static const struct pbias_reg_info pbias_sim_omap3 = {
77static const struct pbias_reg_info pbias_mmc_omap4 = { 79static const struct pbias_reg_info pbias_mmc_omap4 = {
78 .enable = BIT(26) | BIT(22), 80 .enable = BIT(26) | BIT(22),
79 .enable_mask = BIT(26) | BIT(25) | BIT(22), 81 .enable_mask = BIT(26) | BIT(25) | BIT(22),
82 .disable_val = BIT(25),
80 .vmode = BIT(21), 83 .vmode = BIT(21),
81 .enable_time = 100, 84 .enable_time = 100,
82 .name = "pbias_mmc_omap4" 85 .name = "pbias_mmc_omap4"
@@ -85,6 +88,7 @@ static const struct pbias_reg_info pbias_mmc_omap4 = {
85static const struct pbias_reg_info pbias_mmc_omap5 = { 88static const struct pbias_reg_info pbias_mmc_omap5 = {
86 .enable = BIT(27) | BIT(26), 89 .enable = BIT(27) | BIT(26),
87 .enable_mask = BIT(27) | BIT(25) | BIT(26), 90 .enable_mask = BIT(27) | BIT(25) | BIT(26),
91 .disable_val = BIT(25),
88 .vmode = BIT(21), 92 .vmode = BIT(21),
89 .enable_time = 100, 93 .enable_time = 100,
90 .name = "pbias_mmc_omap5" 94 .name = "pbias_mmc_omap5"
@@ -159,6 +163,7 @@ static int pbias_regulator_probe(struct platform_device *pdev)
159 drvdata[data_idx].desc.enable_reg = res->start; 163 drvdata[data_idx].desc.enable_reg = res->start;
160 drvdata[data_idx].desc.enable_mask = info->enable_mask; 164 drvdata[data_idx].desc.enable_mask = info->enable_mask;
161 drvdata[data_idx].desc.enable_val = info->enable; 165 drvdata[data_idx].desc.enable_val = info->enable;
166 drvdata[data_idx].desc.disable_val = info->disable_val;
162 167
163 cfg.init_data = pbias_matches[idx].init_data; 168 cfg.init_data = pbias_matches[idx].init_data;
164 cfg.driver_data = &drvdata[data_idx]; 169 cfg.driver_data = &drvdata[data_idx];
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
index 8cc8d1877c44..2a44e5dd9c2a 100644
--- a/drivers/regulator/pfuze100-regulator.c
+++ b/drivers/regulator/pfuze100-regulator.c
@@ -643,7 +643,6 @@ static struct i2c_driver pfuze_driver = {
643 .id_table = pfuze_device_id, 643 .id_table = pfuze_device_id,
644 .driver = { 644 .driver = {
645 .name = "pfuze100-regulator", 645 .name = "pfuze100-regulator",
646 .owner = THIS_MODULE,
647 .of_match_table = pfuze_dt_ids, 646 .of_match_table = pfuze_dt_ids,
648 }, 647 },
649 .probe = pfuze100_regulator_probe, 648 .probe = pfuze100_regulator_probe,
@@ -653,4 +652,3 @@ module_i2c_driver(pfuze_driver);
653MODULE_AUTHOR("Robin Gong <b38343@freescale.com>"); 652MODULE_AUTHOR("Robin Gong <b38343@freescale.com>");
654MODULE_DESCRIPTION("Regulator Driver for Freescale PFUZE100/PFUZE200 PMIC"); 653MODULE_DESCRIPTION("Regulator Driver for Freescale PFUZE100/PFUZE200 PMIC");
655MODULE_LICENSE("GPL v2"); 654MODULE_LICENSE("GPL v2");
656MODULE_ALIAS("i2c:pfuze100-regulator");
diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c
index ffa96124a5e7..fc3166dfcbfa 100644
--- a/drivers/regulator/pwm-regulator.c
+++ b/drivers/regulator/pwm-regulator.c
@@ -10,6 +10,7 @@
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 12
13#include <linux/delay.h>
13#include <linux/module.h> 14#include <linux/module.h>
14#include <linux/init.h> 15#include <linux/init.h>
15#include <linux/err.h> 16#include <linux/err.h>
@@ -21,9 +22,15 @@
21#include <linux/pwm.h> 22#include <linux/pwm.h>
22 23
23struct pwm_regulator_data { 24struct pwm_regulator_data {
24 struct pwm_voltages *duty_cycle_table; 25 /* Shared */
25 struct pwm_device *pwm; 26 struct pwm_device *pwm;
27
28 /* Voltage table */
29 struct pwm_voltages *duty_cycle_table;
26 int state; 30 int state;
31
32 /* Continuous voltage */
33 int volt_uV;
27}; 34};
28 35
29struct pwm_voltages { 36struct pwm_voltages {
@@ -31,6 +38,9 @@ struct pwm_voltages {
31 unsigned int dutycycle; 38 unsigned int dutycycle;
32}; 39};
33 40
41/**
42 * Voltage table call-backs
43 */
34static int pwm_regulator_get_voltage_sel(struct regulator_dev *rdev) 44static int pwm_regulator_get_voltage_sel(struct regulator_dev *rdev)
35{ 45{
36 struct pwm_regulator_data *drvdata = rdev_get_drvdata(rdev); 46 struct pwm_regulator_data *drvdata = rdev_get_drvdata(rdev);
@@ -79,29 +89,129 @@ static int pwm_regulator_list_voltage(struct regulator_dev *rdev,
79 return drvdata->duty_cycle_table[selector].uV; 89 return drvdata->duty_cycle_table[selector].uV;
80} 90}
81 91
82static struct regulator_ops pwm_regulator_voltage_ops = { 92/**
93 * Continuous voltage call-backs
94 */
95static int pwm_voltage_to_duty_cycle_percentage(struct regulator_dev *rdev, int req_uV)
96{
97 int min_uV = rdev->constraints->min_uV;
98 int max_uV = rdev->constraints->max_uV;
99 int diff = max_uV - min_uV;
100
101 return 100 - (((req_uV * 100) - (min_uV * 100)) / diff);
102}
103
104static int pwm_regulator_get_voltage(struct regulator_dev *rdev)
105{
106 struct pwm_regulator_data *drvdata = rdev_get_drvdata(rdev);
107
108 return drvdata->volt_uV;
109}
110
111static int pwm_regulator_set_voltage(struct regulator_dev *rdev,
112 int min_uV, int max_uV,
113 unsigned *selector)
114{
115 struct pwm_regulator_data *drvdata = rdev_get_drvdata(rdev);
116 unsigned int ramp_delay = rdev->constraints->ramp_delay;
117 unsigned int period = pwm_get_period(drvdata->pwm);
118 int duty_cycle;
119 int ret;
120
121 duty_cycle = pwm_voltage_to_duty_cycle_percentage(rdev, min_uV);
122
123 ret = pwm_config(drvdata->pwm, (period / 100) * duty_cycle, period);
124 if (ret) {
125 dev_err(&rdev->dev, "Failed to configure PWM\n");
126 return ret;
127 }
128
129 ret = pwm_enable(drvdata->pwm);
130 if (ret) {
131 dev_err(&rdev->dev, "Failed to enable PWM\n");
132 return ret;
133 }
134 drvdata->volt_uV = min_uV;
135
136 /* Delay required by PWM regulator to settle to the new voltage */
137 usleep_range(ramp_delay, ramp_delay + 1000);
138
139 return 0;
140}
141
142static struct regulator_ops pwm_regulator_voltage_table_ops = {
83 .set_voltage_sel = pwm_regulator_set_voltage_sel, 143 .set_voltage_sel = pwm_regulator_set_voltage_sel,
84 .get_voltage_sel = pwm_regulator_get_voltage_sel, 144 .get_voltage_sel = pwm_regulator_get_voltage_sel,
85 .list_voltage = pwm_regulator_list_voltage, 145 .list_voltage = pwm_regulator_list_voltage,
86 .map_voltage = regulator_map_voltage_iterate, 146 .map_voltage = regulator_map_voltage_iterate,
87}; 147};
88 148
149static struct regulator_ops pwm_regulator_voltage_continuous_ops = {
150 .get_voltage = pwm_regulator_get_voltage,
151 .set_voltage = pwm_regulator_set_voltage,
152};
153
89static struct regulator_desc pwm_regulator_desc = { 154static struct regulator_desc pwm_regulator_desc = {
90 .name = "pwm-regulator", 155 .name = "pwm-regulator",
91 .ops = &pwm_regulator_voltage_ops,
92 .type = REGULATOR_VOLTAGE, 156 .type = REGULATOR_VOLTAGE,
93 .owner = THIS_MODULE, 157 .owner = THIS_MODULE,
94 .supply_name = "pwm", 158 .supply_name = "pwm",
95}; 159};
96 160
161static int pwm_regulator_init_table(struct platform_device *pdev,
162 struct pwm_regulator_data *drvdata)
163{
164 struct device_node *np = pdev->dev.of_node;
165 struct pwm_voltages *duty_cycle_table;
166 unsigned int length = 0;
167 int ret;
168
169 of_find_property(np, "voltage-table", &length);
170
171 if ((length < sizeof(*duty_cycle_table)) ||
172 (length % sizeof(*duty_cycle_table))) {
173 dev_err(&pdev->dev,
174 "voltage-table length(%d) is invalid\n",
175 length);
176 return -EINVAL;
177 }
178
179 duty_cycle_table = devm_kzalloc(&pdev->dev, length, GFP_KERNEL);
180 if (!duty_cycle_table)
181 return -ENOMEM;
182
183 ret = of_property_read_u32_array(np, "voltage-table",
184 (u32 *)duty_cycle_table,
185 length / sizeof(u32));
186 if (ret) {
187 dev_err(&pdev->dev, "Failed to read voltage-table\n");
188 return ret;
189 }
190
191 drvdata->duty_cycle_table = duty_cycle_table;
192 pwm_regulator_desc.ops = &pwm_regulator_voltage_table_ops;
193 pwm_regulator_desc.n_voltages = length / sizeof(*duty_cycle_table);
194
195 return 0;
196}
197
198static int pwm_regulator_init_continuous(struct platform_device *pdev,
199 struct pwm_regulator_data *drvdata)
200{
201 pwm_regulator_desc.ops = &pwm_regulator_voltage_continuous_ops;
202 pwm_regulator_desc.continuous_voltage_range = true;
203
204 return 0;
205}
206
97static int pwm_regulator_probe(struct platform_device *pdev) 207static int pwm_regulator_probe(struct platform_device *pdev)
98{ 208{
209 const struct regulator_init_data *init_data;
99 struct pwm_regulator_data *drvdata; 210 struct pwm_regulator_data *drvdata;
100 struct property *prop;
101 struct regulator_dev *regulator; 211 struct regulator_dev *regulator;
102 struct regulator_config config = { }; 212 struct regulator_config config = { };
103 struct device_node *np = pdev->dev.of_node; 213 struct device_node *np = pdev->dev.of_node;
104 int length, ret; 214 int ret;
105 215
106 if (!np) { 216 if (!np) {
107 dev_err(&pdev->dev, "Device Tree node missing\n"); 217 dev_err(&pdev->dev, "Device Tree node missing\n");
@@ -112,44 +222,22 @@ static int pwm_regulator_probe(struct platform_device *pdev)
112 if (!drvdata) 222 if (!drvdata)
113 return -ENOMEM; 223 return -ENOMEM;
114 224
115 /* determine the number of voltage-table */ 225 if (of_find_property(np, "voltage-table", NULL))
116 prop = of_find_property(np, "voltage-table", &length); 226 ret = pwm_regulator_init_table(pdev, drvdata);
117 if (!prop) { 227 else
118 dev_err(&pdev->dev, "No voltage-table\n"); 228 ret = pwm_regulator_init_continuous(pdev, drvdata);
119 return -EINVAL; 229 if (ret)
120 }
121
122 if ((length < sizeof(*drvdata->duty_cycle_table)) ||
123 (length % sizeof(*drvdata->duty_cycle_table))) {
124 dev_err(&pdev->dev, "voltage-table length(%d) is invalid\n",
125 length);
126 return -EINVAL;
127 }
128
129 pwm_regulator_desc.n_voltages = length / sizeof(*drvdata->duty_cycle_table);
130
131 drvdata->duty_cycle_table = devm_kzalloc(&pdev->dev,
132 length, GFP_KERNEL);
133 if (!drvdata->duty_cycle_table)
134 return -ENOMEM;
135
136 /* read voltage table from DT property */
137 ret = of_property_read_u32_array(np, "voltage-table",
138 (u32 *)drvdata->duty_cycle_table,
139 length / sizeof(u32));
140 if (ret < 0) {
141 dev_err(&pdev->dev, "read voltage-table failed\n");
142 return ret; 230 return ret;
143 }
144 231
145 config.init_data = of_get_regulator_init_data(&pdev->dev, np, 232 init_data = of_get_regulator_init_data(&pdev->dev, np,
146 &pwm_regulator_desc); 233 &pwm_regulator_desc);
147 if (!config.init_data) 234 if (!init_data)
148 return -ENOMEM; 235 return -ENOMEM;
149 236
150 config.of_node = np; 237 config.of_node = np;
151 config.dev = &pdev->dev; 238 config.dev = &pdev->dev;
152 config.driver_data = drvdata; 239 config.driver_data = drvdata;
240 config.init_data = init_data;
153 241
154 drvdata->pwm = devm_pwm_get(&pdev->dev, NULL); 242 drvdata->pwm = devm_pwm_get(&pdev->dev, NULL);
155 if (IS_ERR(drvdata->pwm)) { 243 if (IS_ERR(drvdata->pwm)) {
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
new file mode 100644
index 000000000000..9c6167dd2c8b
--- /dev/null
+++ b/drivers/regulator/qcom_smd-regulator.c
@@ -0,0 +1,350 @@
1/*
2 * Copyright (c) 2015, Sony Mobile Communications AB.
3 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <linux/module.h>
16#include <linux/of.h>
17#include <linux/of_device.h>
18#include <linux/platform_device.h>
19#include <linux/regulator/driver.h>
20#include <linux/regulator/machine.h>
21#include <linux/regulator/of_regulator.h>
22#include <linux/soc/qcom/smd-rpm.h>
23
24struct qcom_rpm_reg {
25 struct device *dev;
26
27 struct qcom_smd_rpm *rpm;
28
29 u32 type;
30 u32 id;
31
32 struct regulator_desc desc;
33
34 int is_enabled;
35 int uV;
36};
37
38struct rpm_regulator_req {
39 u32 key;
40 u32 nbytes;
41 u32 value;
42};
43
44#define RPM_KEY_SWEN 0x6e657773 /* "swen" */
45#define RPM_KEY_UV 0x00007675 /* "uv" */
46#define RPM_KEY_MA 0x0000616d /* "ma" */
47
48static int rpm_reg_write_active(struct qcom_rpm_reg *vreg,
49 struct rpm_regulator_req *req,
50 size_t size)
51{
52 return qcom_rpm_smd_write(vreg->rpm,
53 QCOM_SMD_RPM_ACTIVE_STATE,
54 vreg->type,
55 vreg->id,
56 req, size);
57}
58
59static int rpm_reg_enable(struct regulator_dev *rdev)
60{
61 struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
62 struct rpm_regulator_req req;
63 int ret;
64
65 req.key = RPM_KEY_SWEN;
66 req.nbytes = sizeof(u32);
67 req.value = 1;
68
69 ret = rpm_reg_write_active(vreg, &req, sizeof(req));
70 if (!ret)
71 vreg->is_enabled = 1;
72
73 return ret;
74}
75
76static int rpm_reg_is_enabled(struct regulator_dev *rdev)
77{
78 struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
79
80 return vreg->is_enabled;
81}
82
83static int rpm_reg_disable(struct regulator_dev *rdev)
84{
85 struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
86 struct rpm_regulator_req req;
87 int ret;
88
89 req.key = RPM_KEY_SWEN;
90 req.nbytes = sizeof(u32);
91 req.value = 0;
92
93 ret = rpm_reg_write_active(vreg, &req, sizeof(req));
94 if (!ret)
95 vreg->is_enabled = 0;
96
97 return ret;
98}
99
100static int rpm_reg_get_voltage(struct regulator_dev *rdev)
101{
102 struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
103
104 return vreg->uV;
105}
106
107static int rpm_reg_set_voltage(struct regulator_dev *rdev,
108 int min_uV,
109 int max_uV,
110 unsigned *selector)
111{
112 struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
113 struct rpm_regulator_req req;
114 int ret = 0;
115
116 req.key = RPM_KEY_UV;
117 req.nbytes = sizeof(u32);
118 req.value = min_uV;
119
120 ret = rpm_reg_write_active(vreg, &req, sizeof(req));
121 if (!ret)
122 vreg->uV = min_uV;
123
124 return ret;
125}
126
127static int rpm_reg_set_load(struct regulator_dev *rdev, int load_uA)
128{
129 struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
130 struct rpm_regulator_req req;
131
132 req.key = RPM_KEY_MA;
133 req.nbytes = sizeof(u32);
134 req.value = load_uA;
135
136 return rpm_reg_write_active(vreg, &req, sizeof(req));
137}
138
139static const struct regulator_ops rpm_smps_ldo_ops = {
140 .enable = rpm_reg_enable,
141 .disable = rpm_reg_disable,
142 .is_enabled = rpm_reg_is_enabled,
143
144 .get_voltage = rpm_reg_get_voltage,
145 .set_voltage = rpm_reg_set_voltage,
146
147 .set_load = rpm_reg_set_load,
148};
149
150static const struct regulator_ops rpm_switch_ops = {
151 .enable = rpm_reg_enable,
152 .disable = rpm_reg_disable,
153 .is_enabled = rpm_reg_is_enabled,
154};
155
156static const struct regulator_desc pm8x41_hfsmps = {
157 .linear_ranges = (struct regulator_linear_range[]) {
158 REGULATOR_LINEAR_RANGE( 375000, 0, 95, 12500),
159 REGULATOR_LINEAR_RANGE(1550000, 96, 158, 25000),
160 },
161 .n_linear_ranges = 2,
162 .n_voltages = 159,
163 .ops = &rpm_smps_ldo_ops,
164};
165
166static const struct regulator_desc pm8841_ftsmps = {
167 .linear_ranges = (struct regulator_linear_range[]) {
168 REGULATOR_LINEAR_RANGE(350000, 0, 184, 5000),
169 REGULATOR_LINEAR_RANGE(700000, 185, 339, 10000),
170 },
171 .n_linear_ranges = 2,
172 .n_voltages = 340,
173 .ops = &rpm_smps_ldo_ops,
174};
175
176static const struct regulator_desc pm8941_boost = {
177 .linear_ranges = (struct regulator_linear_range[]) {
178 REGULATOR_LINEAR_RANGE(4000000, 0, 15, 100000),
179 },
180 .n_linear_ranges = 1,
181 .n_voltages = 16,
182 .ops = &rpm_smps_ldo_ops,
183};
184
185static const struct regulator_desc pm8941_pldo = {
186 .linear_ranges = (struct regulator_linear_range[]) {
187 REGULATOR_LINEAR_RANGE( 750000, 0, 30, 25000),
188 REGULATOR_LINEAR_RANGE(1500000, 31, 99, 50000),
189 },
190 .n_linear_ranges = 2,
191 .n_voltages = 100,
192 .ops = &rpm_smps_ldo_ops,
193};
194
195static const struct regulator_desc pm8941_nldo = {
196 .linear_ranges = (struct regulator_linear_range[]) {
197 REGULATOR_LINEAR_RANGE(750000, 0, 63, 12500),
198 },
199 .n_linear_ranges = 1,
200 .n_voltages = 64,
201 .ops = &rpm_smps_ldo_ops,
202};
203
204static const struct regulator_desc pm8941_lnldo = {
205 .fixed_uV = 1740000,
206 .n_voltages = 1,
207 .ops = &rpm_smps_ldo_ops,
208};
209
210static const struct regulator_desc pm8941_switch = {
211 .ops = &rpm_switch_ops,
212};
213
214struct rpm_regulator_data {
215 const char *name;
216 u32 type;
217 u32 id;
218 const struct regulator_desc *desc;
219 const char *supply;
220};
221
222static const struct rpm_regulator_data rpm_pm8841_regulators[] = {
223 { "s1", QCOM_SMD_RPM_SMPB, 1, &pm8x41_hfsmps, "vdd_s1" },
224 { "s2", QCOM_SMD_RPM_SMPB, 2, &pm8841_ftsmps, "vdd_s2" },
225 { "s3", QCOM_SMD_RPM_SMPB, 3, &pm8x41_hfsmps, "vdd_s3" },
226 { "s4", QCOM_SMD_RPM_SMPB, 4, &pm8841_ftsmps, "vdd_s4" },
227 { "s5", QCOM_SMD_RPM_SMPB, 5, &pm8841_ftsmps, "vdd_s5" },
228 { "s6", QCOM_SMD_RPM_SMPB, 6, &pm8841_ftsmps, "vdd_s6" },
229 { "s7", QCOM_SMD_RPM_SMPB, 7, &pm8841_ftsmps, "vdd_s7" },
230 { "s8", QCOM_SMD_RPM_SMPB, 8, &pm8841_ftsmps, "vdd_s8" },
231 {}
232};
233
234static const struct rpm_regulator_data rpm_pm8941_regulators[] = {
235 { "s1", QCOM_SMD_RPM_SMPA, 1, &pm8x41_hfsmps, "vdd_s1" },
236 { "s2", QCOM_SMD_RPM_SMPA, 2, &pm8x41_hfsmps, "vdd_s2" },
237 { "s3", QCOM_SMD_RPM_SMPA, 3, &pm8x41_hfsmps, "vdd_s3" },
238 { "s4", QCOM_SMD_RPM_BOOST, 1, &pm8941_boost },
239
240 { "l1", QCOM_SMD_RPM_LDOA, 1, &pm8941_nldo, "vdd_l1_l3" },
241 { "l2", QCOM_SMD_RPM_LDOA, 2, &pm8941_nldo, "vdd_l2_lvs1_2_3" },
242 { "l3", QCOM_SMD_RPM_LDOA, 3, &pm8941_nldo, "vdd_l1_l3" },
243 { "l4", QCOM_SMD_RPM_LDOA, 4, &pm8941_nldo, "vdd_l4_l11" },
244 { "l5", QCOM_SMD_RPM_LDOA, 5, &pm8941_lnldo, "vdd_l5_l7" },
245 { "l6", QCOM_SMD_RPM_LDOA, 6, &pm8941_pldo, "vdd_l6_l12_l14_l15" },
246 { "l7", QCOM_SMD_RPM_LDOA, 7, &pm8941_lnldo, "vdd_l5_l7" },
247 { "l8", QCOM_SMD_RPM_LDOA, 8, &pm8941_pldo, "vdd_l8_l16_l18_l19" },
248 { "l9", QCOM_SMD_RPM_LDOA, 9, &pm8941_pldo, "vdd_l9_l10_l17_l22" },
249 { "l10", QCOM_SMD_RPM_LDOA, 10, &pm8941_pldo, "vdd_l9_l10_l17_l22" },
250 { "l11", QCOM_SMD_RPM_LDOA, 11, &pm8941_nldo, "vdd_l4_l11" },
251 { "l12", QCOM_SMD_RPM_LDOA, 12, &pm8941_pldo, "vdd_l6_l12_l14_l15" },
252 { "l13", QCOM_SMD_RPM_LDOA, 13, &pm8941_pldo, "vdd_l13_l20_l23_l24" },
253 { "l14", QCOM_SMD_RPM_LDOA, 14, &pm8941_pldo, "vdd_l6_l12_l14_l15" },
254 { "l15", QCOM_SMD_RPM_LDOA, 15, &pm8941_pldo, "vdd_l6_l12_l14_l15" },
255 { "l16", QCOM_SMD_RPM_LDOA, 16, &pm8941_pldo, "vdd_l8_l16_l18_l19" },
256 { "l17", QCOM_SMD_RPM_LDOA, 17, &pm8941_pldo, "vdd_l9_l10_l17_l22" },
257 { "l18", QCOM_SMD_RPM_LDOA, 18, &pm8941_pldo, "vdd_l8_l16_l18_l19" },
258 { "l19", QCOM_SMD_RPM_LDOA, 19, &pm8941_pldo, "vdd_l8_l16_l18_l19" },
259 { "l20", QCOM_SMD_RPM_LDOA, 20, &pm8941_pldo, "vdd_l13_l20_l23_l24" },
260 { "l21", QCOM_SMD_RPM_LDOA, 21, &pm8941_pldo, "vdd_l21" },
261 { "l22", QCOM_SMD_RPM_LDOA, 22, &pm8941_pldo, "vdd_l9_l10_l17_l22" },
262 { "l23", QCOM_SMD_RPM_LDOA, 23, &pm8941_pldo, "vdd_l13_l20_l23_l24" },
263 { "l24", QCOM_SMD_RPM_LDOA, 24, &pm8941_pldo, "vdd_l13_l20_l23_l24" },
264
265 { "lvs1", QCOM_SMD_RPM_VSA, 1, &pm8941_switch, "vdd_l2_lvs1_2_3" },
266 { "lvs2", QCOM_SMD_RPM_VSA, 2, &pm8941_switch, "vdd_l2_lvs1_2_3" },
267 { "lvs3", QCOM_SMD_RPM_VSA, 3, &pm8941_switch, "vdd_l2_lvs1_2_3" },
268
269 { "5vs1", QCOM_SMD_RPM_VSA, 4, &pm8941_switch, "vin_5vs" },
270 { "5vs2", QCOM_SMD_RPM_VSA, 5, &pm8941_switch, "vin_5vs" },
271
272 {}
273};
274
275static const struct of_device_id rpm_of_match[] = {
276 { .compatible = "qcom,rpm-pm8841-regulators", .data = &rpm_pm8841_regulators },
277 { .compatible = "qcom,rpm-pm8941-regulators", .data = &rpm_pm8941_regulators },
278 {}
279};
280MODULE_DEVICE_TABLE(of, rpm_of_match);
281
282static int rpm_reg_probe(struct platform_device *pdev)
283{
284 const struct rpm_regulator_data *reg;
285 const struct of_device_id *match;
286 struct regulator_config config = { };
287 struct regulator_dev *rdev;
288 struct qcom_rpm_reg *vreg;
289 struct qcom_smd_rpm *rpm;
290
291 rpm = dev_get_drvdata(pdev->dev.parent);
292 if (!rpm) {
293 dev_err(&pdev->dev, "unable to retrieve handle to rpm\n");
294 return -ENODEV;
295 }
296
297 match = of_match_device(rpm_of_match, &pdev->dev);
298 for (reg = match->data; reg->name; reg++) {
299 vreg = devm_kzalloc(&pdev->dev, sizeof(*vreg), GFP_KERNEL);
300 if (!vreg)
301 return -ENOMEM;
302
303 vreg->dev = &pdev->dev;
304 vreg->type = reg->type;
305 vreg->id = reg->id;
306 vreg->rpm = rpm;
307
308 memcpy(&vreg->desc, reg->desc, sizeof(vreg->desc));
309
310 vreg->desc.id = -1;
311 vreg->desc.owner = THIS_MODULE;
312 vreg->desc.type = REGULATOR_VOLTAGE;
313 vreg->desc.name = reg->name;
314 vreg->desc.supply_name = reg->supply;
315 vreg->desc.of_match = reg->name;
316
317 config.dev = &pdev->dev;
318 config.driver_data = vreg;
319 rdev = devm_regulator_register(&pdev->dev, &vreg->desc, &config);
320 if (IS_ERR(rdev)) {
321 dev_err(&pdev->dev, "failed to register %s\n", reg->name);
322 return PTR_ERR(rdev);
323 }
324 }
325
326 return 0;
327}
328
329static struct platform_driver rpm_reg_driver = {
330 .probe = rpm_reg_probe,
331 .driver = {
332 .name = "qcom_rpm_smd_regulator",
333 .of_match_table = rpm_of_match,
334 },
335};
336
337static int __init rpm_reg_init(void)
338{
339 return platform_driver_register(&rpm_reg_driver);
340}
341subsys_initcall(rpm_reg_init);
342
343static void __exit rpm_reg_exit(void)
344{
345 platform_driver_unregister(&rpm_reg_driver);
346}
347module_exit(rpm_reg_exit)
348
349MODULE_DESCRIPTION("Qualcomm RPM regulator driver");
350MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/qcom_spmi-regulator.c b/drivers/regulator/qcom_spmi-regulator.c
index 850a30a95b5b..88a5dc88badc 100644
--- a/drivers/regulator/qcom_spmi-regulator.c
+++ b/drivers/regulator/qcom_spmi-regulator.c
@@ -26,6 +26,70 @@
26#include <linux/regmap.h> 26#include <linux/regmap.h>
27#include <linux/list.h> 27#include <linux/list.h>
28 28
29/* Pin control enable input pins. */
30#define SPMI_REGULATOR_PIN_CTRL_ENABLE_NONE 0x00
31#define SPMI_REGULATOR_PIN_CTRL_ENABLE_EN0 0x01
32#define SPMI_REGULATOR_PIN_CTRL_ENABLE_EN1 0x02
33#define SPMI_REGULATOR_PIN_CTRL_ENABLE_EN2 0x04
34#define SPMI_REGULATOR_PIN_CTRL_ENABLE_EN3 0x08
35#define SPMI_REGULATOR_PIN_CTRL_ENABLE_HW_DEFAULT 0x10
36
37/* Pin control high power mode input pins. */
38#define SPMI_REGULATOR_PIN_CTRL_HPM_NONE 0x00
39#define SPMI_REGULATOR_PIN_CTRL_HPM_EN0 0x01
40#define SPMI_REGULATOR_PIN_CTRL_HPM_EN1 0x02
41#define SPMI_REGULATOR_PIN_CTRL_HPM_EN2 0x04
42#define SPMI_REGULATOR_PIN_CTRL_HPM_EN3 0x08
43#define SPMI_REGULATOR_PIN_CTRL_HPM_SLEEP_B 0x10
44#define SPMI_REGULATOR_PIN_CTRL_HPM_HW_DEFAULT 0x20
45
46/*
47 * Used with enable parameters to specify that hardware default register values
48 * should be left unaltered.
49 */
50#define SPMI_REGULATOR_USE_HW_DEFAULT 2
51
52/* Soft start strength of a voltage switch type regulator */
53enum spmi_vs_soft_start_str {
54 SPMI_VS_SOFT_START_STR_0P05_UA = 0,
55 SPMI_VS_SOFT_START_STR_0P25_UA,
56 SPMI_VS_SOFT_START_STR_0P55_UA,
57 SPMI_VS_SOFT_START_STR_0P75_UA,
58 SPMI_VS_SOFT_START_STR_HW_DEFAULT,
59};
60
61/**
62 * struct spmi_regulator_init_data - spmi-regulator initialization data
63 * @pin_ctrl_enable: Bit mask specifying which hardware pins should be
64 * used to enable the regulator, if any
65 * Value should be an ORing of
66 * SPMI_REGULATOR_PIN_CTRL_ENABLE_* constants. If
67 * the bit specified by
68 * SPMI_REGULATOR_PIN_CTRL_ENABLE_HW_DEFAULT is
69 * set, then pin control enable hardware registers
70 * will not be modified.
71 * @pin_ctrl_hpm: Bit mask specifying which hardware pins should be
72 * used to force the regulator into high power
73 * mode, if any
74 * Value should be an ORing of
75 * SPMI_REGULATOR_PIN_CTRL_HPM_* constants. If
76 * the bit specified by
77 * SPMI_REGULATOR_PIN_CTRL_HPM_HW_DEFAULT is
78 * set, then pin control mode hardware registers
79 * will not be modified.
80 * @vs_soft_start_strength: This parameter sets the soft start strength for
81 * voltage switch type regulators. Its value
82 * should be one of SPMI_VS_SOFT_START_STR_*. If
83 * its value is SPMI_VS_SOFT_START_STR_HW_DEFAULT,
84 * then the soft start strength will be left at its
85 * default hardware value.
86 */
87struct spmi_regulator_init_data {
88 unsigned pin_ctrl_enable;
89 unsigned pin_ctrl_hpm;
90 enum spmi_vs_soft_start_str vs_soft_start_strength;
91};
92
29/* These types correspond to unique register layouts. */ 93/* These types correspond to unique register layouts. */
30enum spmi_regulator_logical_type { 94enum spmi_regulator_logical_type {
31 SPMI_REGULATOR_LOGICAL_TYPE_SMPS, 95 SPMI_REGULATOR_LOGICAL_TYPE_SMPS,
@@ -458,6 +522,14 @@ static int spmi_regulator_vs_enable(struct regulator_dev *rdev)
458 return spmi_regulator_common_enable(rdev); 522 return spmi_regulator_common_enable(rdev);
459} 523}
460 524
525static int spmi_regulator_vs_ocp(struct regulator_dev *rdev)
526{
527 struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
528 u8 reg = SPMI_VS_OCP_OVERRIDE;
529
530 return spmi_vreg_write(vreg, SPMI_VS_REG_OCP, &reg, 1);
531}
532
461static int spmi_regulator_common_disable(struct regulator_dev *rdev) 533static int spmi_regulator_common_disable(struct regulator_dev *rdev)
462{ 534{
463 struct spmi_regulator *vreg = rdev_get_drvdata(rdev); 535 struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
@@ -504,8 +576,7 @@ static int spmi_regulator_select_voltage(struct spmi_regulator *vreg,
504 * Force uV to be an allowed set point by applying a ceiling function to 576 * Force uV to be an allowed set point by applying a ceiling function to
505 * the uV value. 577 * the uV value.
506 */ 578 */
507 *voltage_sel = (uV - range->min_uV + range->step_uV - 1) 579 *voltage_sel = DIV_ROUND_UP(uV - range->min_uV, range->step_uV);
508 / range->step_uV;
509 uV = *voltage_sel * range->step_uV + range->min_uV; 580 uV = *voltage_sel * range->step_uV + range->min_uV;
510 581
511 if (uV > max_uV) { 582 if (uV > max_uV) {
@@ -792,6 +863,9 @@ static unsigned int spmi_regulator_common_get_mode(struct regulator_dev *rdev)
792 if (reg & SPMI_COMMON_MODE_HPM_MASK) 863 if (reg & SPMI_COMMON_MODE_HPM_MASK)
793 return REGULATOR_MODE_NORMAL; 864 return REGULATOR_MODE_NORMAL;
794 865
866 if (reg & SPMI_COMMON_MODE_AUTO_MASK)
867 return REGULATOR_MODE_FAST;
868
795 return REGULATOR_MODE_IDLE; 869 return REGULATOR_MODE_IDLE;
796} 870}
797 871
@@ -799,11 +873,13 @@ static int
799spmi_regulator_common_set_mode(struct regulator_dev *rdev, unsigned int mode) 873spmi_regulator_common_set_mode(struct regulator_dev *rdev, unsigned int mode)
800{ 874{
801 struct spmi_regulator *vreg = rdev_get_drvdata(rdev); 875 struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
802 u8 mask = SPMI_COMMON_MODE_HPM_MASK; 876 u8 mask = SPMI_COMMON_MODE_HPM_MASK | SPMI_COMMON_MODE_AUTO_MASK;
803 u8 val = 0; 877 u8 val = 0;
804 878
805 if (mode == REGULATOR_MODE_NORMAL) 879 if (mode == REGULATOR_MODE_NORMAL)
806 val = mask; 880 val = SPMI_COMMON_MODE_HPM_MASK;
881 else if (mode == REGULATOR_MODE_FAST)
882 val = SPMI_COMMON_MODE_AUTO_MASK;
807 883
808 return spmi_vreg_update_bits(vreg, SPMI_COMMON_REG_MODE, val, mask); 884 return spmi_vreg_update_bits(vreg, SPMI_COMMON_REG_MODE, val, mask);
809} 885}
@@ -973,6 +1049,7 @@ static struct regulator_ops spmi_vs_ops = {
973 .is_enabled = spmi_regulator_common_is_enabled, 1049 .is_enabled = spmi_regulator_common_is_enabled,
974 .set_pull_down = spmi_regulator_common_set_pull_down, 1050 .set_pull_down = spmi_regulator_common_set_pull_down,
975 .set_soft_start = spmi_regulator_common_set_soft_start, 1051 .set_soft_start = spmi_regulator_common_set_soft_start,
1052 .set_over_current_protection = spmi_regulator_vs_ocp,
976}; 1053};
977 1054
978static struct regulator_ops spmi_boost_ops = { 1055static struct regulator_ops spmi_boost_ops = {
@@ -1203,10 +1280,111 @@ static int spmi_regulator_ftsmps_init_slew_rate(struct spmi_regulator *vreg)
1203 return ret; 1280 return ret;
1204} 1281}
1205 1282
1283static int spmi_regulator_init_registers(struct spmi_regulator *vreg,
1284 const struct spmi_regulator_init_data *data)
1285{
1286 int ret;
1287 enum spmi_regulator_logical_type type;
1288 u8 ctrl_reg[8], reg, mask;
1289
1290 type = vreg->logical_type;
1291
1292 ret = spmi_vreg_read(vreg, SPMI_COMMON_REG_VOLTAGE_RANGE, ctrl_reg, 8);
1293 if (ret)
1294 return ret;
1295
1296 /* Set up enable pin control. */
1297 if ((type == SPMI_REGULATOR_LOGICAL_TYPE_SMPS
1298 || type == SPMI_REGULATOR_LOGICAL_TYPE_LDO
1299 || type == SPMI_REGULATOR_LOGICAL_TYPE_VS)
1300 && !(data->pin_ctrl_enable
1301 & SPMI_REGULATOR_PIN_CTRL_ENABLE_HW_DEFAULT)) {
1302 ctrl_reg[SPMI_COMMON_IDX_ENABLE] &=
1303 ~SPMI_COMMON_ENABLE_FOLLOW_ALL_MASK;
1304 ctrl_reg[SPMI_COMMON_IDX_ENABLE] |=
1305 data->pin_ctrl_enable & SPMI_COMMON_ENABLE_FOLLOW_ALL_MASK;
1306 }
1307
1308 /* Set up mode pin control. */
1309 if ((type == SPMI_REGULATOR_LOGICAL_TYPE_SMPS
1310 || type == SPMI_REGULATOR_LOGICAL_TYPE_LDO)
1311 && !(data->pin_ctrl_hpm
1312 & SPMI_REGULATOR_PIN_CTRL_HPM_HW_DEFAULT)) {
1313 ctrl_reg[SPMI_COMMON_IDX_MODE] &=
1314 ~SPMI_COMMON_MODE_FOLLOW_ALL_MASK;
1315 ctrl_reg[SPMI_COMMON_IDX_MODE] |=
1316 data->pin_ctrl_hpm & SPMI_COMMON_MODE_FOLLOW_ALL_MASK;
1317 }
1318
1319 if (type == SPMI_REGULATOR_LOGICAL_TYPE_VS
1320 && !(data->pin_ctrl_hpm & SPMI_REGULATOR_PIN_CTRL_HPM_HW_DEFAULT)) {
1321 ctrl_reg[SPMI_COMMON_IDX_MODE] &=
1322 ~SPMI_COMMON_MODE_FOLLOW_AWAKE_MASK;
1323 ctrl_reg[SPMI_COMMON_IDX_MODE] |=
1324 data->pin_ctrl_hpm & SPMI_COMMON_MODE_FOLLOW_AWAKE_MASK;
1325 }
1326
1327 if ((type == SPMI_REGULATOR_LOGICAL_TYPE_ULT_LO_SMPS
1328 || type == SPMI_REGULATOR_LOGICAL_TYPE_ULT_HO_SMPS
1329 || type == SPMI_REGULATOR_LOGICAL_TYPE_ULT_LDO)
1330 && !(data->pin_ctrl_hpm
1331 & SPMI_REGULATOR_PIN_CTRL_HPM_HW_DEFAULT)) {
1332 ctrl_reg[SPMI_COMMON_IDX_MODE] &=
1333 ~SPMI_COMMON_MODE_FOLLOW_AWAKE_MASK;
1334 ctrl_reg[SPMI_COMMON_IDX_MODE] |=
1335 data->pin_ctrl_hpm & SPMI_COMMON_MODE_FOLLOW_AWAKE_MASK;
1336 }
1337
1338 /* Write back any control register values that were modified. */
1339 ret = spmi_vreg_write(vreg, SPMI_COMMON_REG_VOLTAGE_RANGE, ctrl_reg, 8);
1340 if (ret)
1341 return ret;
1342
1343 /* Set soft start strength and over current protection for VS. */
1344 if (type == SPMI_REGULATOR_LOGICAL_TYPE_VS) {
1345 if (data->vs_soft_start_strength
1346 != SPMI_VS_SOFT_START_STR_HW_DEFAULT) {
1347 reg = data->vs_soft_start_strength
1348 & SPMI_VS_SOFT_START_SEL_MASK;
1349 mask = SPMI_VS_SOFT_START_SEL_MASK;
1350 return spmi_vreg_update_bits(vreg,
1351 SPMI_VS_REG_SOFT_START,
1352 reg, mask);
1353 }
1354 }
1355
1356 return 0;
1357}
1358
1359static void spmi_regulator_get_dt_config(struct spmi_regulator *vreg,
1360 struct device_node *node, struct spmi_regulator_init_data *data)
1361{
1362 /*
1363 * Initialize configuration parameters to use hardware default in case
1364 * no value is specified via device tree.
1365 */
1366 data->pin_ctrl_enable = SPMI_REGULATOR_PIN_CTRL_ENABLE_HW_DEFAULT;
1367 data->pin_ctrl_hpm = SPMI_REGULATOR_PIN_CTRL_HPM_HW_DEFAULT;
1368 data->vs_soft_start_strength = SPMI_VS_SOFT_START_STR_HW_DEFAULT;
1369
1370 /* These bindings are optional, so it is okay if they aren't found. */
1371 of_property_read_u32(node, "qcom,ocp-max-retries",
1372 &vreg->ocp_max_retries);
1373 of_property_read_u32(node, "qcom,ocp-retry-delay",
1374 &vreg->ocp_retry_delay_ms);
1375 of_property_read_u32(node, "qcom,pin-ctrl-enable",
1376 &data->pin_ctrl_enable);
1377 of_property_read_u32(node, "qcom,pin-ctrl-hpm", &data->pin_ctrl_hpm);
1378 of_property_read_u32(node, "qcom,vs-soft-start-strength",
1379 &data->vs_soft_start_strength);
1380}
1381
1206static unsigned int spmi_regulator_of_map_mode(unsigned int mode) 1382static unsigned int spmi_regulator_of_map_mode(unsigned int mode)
1207{ 1383{
1208 if (mode) 1384 if (mode == 1)
1209 return REGULATOR_MODE_NORMAL; 1385 return REGULATOR_MODE_NORMAL;
1386 if (mode == 2)
1387 return REGULATOR_MODE_FAST;
1210 1388
1211 return REGULATOR_MODE_IDLE; 1389 return REGULATOR_MODE_IDLE;
1212} 1390}
@@ -1215,12 +1393,23 @@ static int spmi_regulator_of_parse(struct device_node *node,
1215 const struct regulator_desc *desc, 1393 const struct regulator_desc *desc,
1216 struct regulator_config *config) 1394 struct regulator_config *config)
1217{ 1395{
1396 struct spmi_regulator_init_data data = { };
1218 struct spmi_regulator *vreg = config->driver_data; 1397 struct spmi_regulator *vreg = config->driver_data;
1219 struct device *dev = config->dev; 1398 struct device *dev = config->dev;
1220 int ret; 1399 int ret;
1221 1400
1222 vreg->ocp_max_retries = SPMI_VS_OCP_DEFAULT_MAX_RETRIES; 1401 spmi_regulator_get_dt_config(vreg, node, &data);
1223 vreg->ocp_retry_delay_ms = SPMI_VS_OCP_DEFAULT_RETRY_DELAY_MS; 1402
1403 if (!vreg->ocp_max_retries)
1404 vreg->ocp_max_retries = SPMI_VS_OCP_DEFAULT_MAX_RETRIES;
1405 if (!vreg->ocp_retry_delay_ms)
1406 vreg->ocp_retry_delay_ms = SPMI_VS_OCP_DEFAULT_RETRY_DELAY_MS;
1407
1408 ret = spmi_regulator_init_registers(vreg, &data);
1409 if (ret) {
1410 dev_err(dev, "common initialization failed, ret=%d\n", ret);
1411 return ret;
1412 }
1224 1413
1225 if (vreg->logical_type == SPMI_REGULATOR_LOGICAL_TYPE_FTSMPS) { 1414 if (vreg->logical_type == SPMI_REGULATOR_LOGICAL_TYPE_FTSMPS) {
1226 ret = spmi_regulator_ftsmps_init_slew_rate(vreg); 1415 ret = spmi_regulator_ftsmps_init_slew_rate(vreg);
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index 3fd44353cc80..d86a3dcd61e2 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -16,12 +16,16 @@
16 * more details. 16 * more details.
17 */ 17 */
18 18
19#include <linux/module.h> 19#include <linux/delay.h>
20#include <linux/gpio.h>
20#include <linux/i2c.h> 21#include <linux/i2c.h>
21#include <linux/mfd/rk808.h> 22#include <linux/module.h>
22#include <linux/of_device.h> 23#include <linux/of_device.h>
24#include <linux/of_gpio.h>
25#include <linux/mfd/rk808.h>
23#include <linux/regulator/driver.h> 26#include <linux/regulator/driver.h>
24#include <linux/regulator/of_regulator.h> 27#include <linux/regulator/of_regulator.h>
28#include <linux/gpio/consumer.h>
25 29
26/* Field Definitions */ 30/* Field Definitions */
27#define RK808_BUCK_VSEL_MASK 0x3f 31#define RK808_BUCK_VSEL_MASK 0x3f
@@ -36,12 +40,25 @@
36#define RK808_RAMP_RATE_6MV_PER_US (2 << RK808_RAMP_RATE_OFFSET) 40#define RK808_RAMP_RATE_6MV_PER_US (2 << RK808_RAMP_RATE_OFFSET)
37#define RK808_RAMP_RATE_10MV_PER_US (3 << RK808_RAMP_RATE_OFFSET) 41#define RK808_RAMP_RATE_10MV_PER_US (3 << RK808_RAMP_RATE_OFFSET)
38 42
43#define RK808_DVS2_POL BIT(2)
44#define RK808_DVS1_POL BIT(1)
45
39/* Offset from XXX_ON_VSEL to XXX_SLP_VSEL */ 46/* Offset from XXX_ON_VSEL to XXX_SLP_VSEL */
40#define RK808_SLP_REG_OFFSET 1 47#define RK808_SLP_REG_OFFSET 1
41 48
49/* Offset from XXX_ON_VSEL to XXX_DVS_VSEL */
50#define RK808_DVS_REG_OFFSET 2
51
42/* Offset from XXX_EN_REG to SLEEP_SET_OFF_XXX */ 52/* Offset from XXX_EN_REG to SLEEP_SET_OFF_XXX */
43#define RK808_SLP_SET_OFF_REG_OFFSET 2 53#define RK808_SLP_SET_OFF_REG_OFFSET 2
44 54
55/* max steps for increase voltage of Buck1/2, equal 100mv*/
56#define MAX_STEPS_ONE_TIME 8
57
58struct rk808_regulator_data {
59 struct gpio_desc *dvs_gpio[2];
60};
61
45static const int rk808_buck_config_regs[] = { 62static const int rk808_buck_config_regs[] = {
46 RK808_BUCK1_CONFIG_REG, 63 RK808_BUCK1_CONFIG_REG,
47 RK808_BUCK2_CONFIG_REG, 64 RK808_BUCK2_CONFIG_REG,
@@ -70,6 +87,131 @@ static const struct regulator_linear_range rk808_ldo6_voltage_ranges[] = {
70 REGULATOR_LINEAR_RANGE(800000, 0, 17, 100000), 87 REGULATOR_LINEAR_RANGE(800000, 0, 17, 100000),
71}; 88};
72 89
90static int rk808_buck1_2_get_voltage_sel_regmap(struct regulator_dev *rdev)
91{
92 struct rk808_regulator_data *pdata = rdev_get_drvdata(rdev);
93 int id = rdev->desc->id - RK808_ID_DCDC1;
94 struct gpio_desc *gpio = pdata->dvs_gpio[id];
95 unsigned int val;
96 int ret;
97
98 if (!gpio || gpiod_get_value(gpio) == 0)
99 return regulator_get_voltage_sel_regmap(rdev);
100
101 ret = regmap_read(rdev->regmap,
102 rdev->desc->vsel_reg + RK808_DVS_REG_OFFSET,
103 &val);
104 if (ret != 0)
105 return ret;
106
107 val &= rdev->desc->vsel_mask;
108 val >>= ffs(rdev->desc->vsel_mask) - 1;
109
110 return val;
111}
112
113static int rk808_buck1_2_i2c_set_voltage_sel(struct regulator_dev *rdev,
114 unsigned sel)
115{
116 int ret, delta_sel;
117 unsigned int old_sel, tmp, val, mask = rdev->desc->vsel_mask;
118
119 ret = regmap_read(rdev->regmap, rdev->desc->vsel_reg, &val);
120 if (ret != 0)
121 return ret;
122
123 tmp = val & ~mask;
124 old_sel = val & mask;
125 old_sel >>= ffs(mask) - 1;
126 delta_sel = sel - old_sel;
127
128 /*
129 * If directly modify the register to change the voltage, we will face
130 * the risk of overshoot. Put it into a multi-step, can effectively
131 * avoid this problem, a step is 100mv here.
132 */
133 while (delta_sel > MAX_STEPS_ONE_TIME) {
134 old_sel += MAX_STEPS_ONE_TIME;
135 val = old_sel << (ffs(mask) - 1);
136 val |= tmp;
137
138 /*
139 * i2c is 400kHz (2.5us per bit) and we must transmit _at least_
140 * 3 bytes (24 bits) plus start and stop so 26 bits. So we've
141 * got more than 65 us between each voltage change and thus
142 * won't ramp faster than ~1500 uV / us.
143 */
144 ret = regmap_write(rdev->regmap, rdev->desc->vsel_reg, val);
145 delta_sel = sel - old_sel;
146 }
147
148 sel <<= ffs(mask) - 1;
149 val = tmp | sel;
150 ret = regmap_write(rdev->regmap, rdev->desc->vsel_reg, val);
151
152 /*
153 * When we change the voltage register directly, the ramp rate is about
154 * 100000uv/us, wait 1us to make sure the target voltage to be stable,
155 * so we needn't wait extra time after that.
156 */
157 udelay(1);
158
159 return ret;
160}
161
162static int rk808_buck1_2_set_voltage_sel(struct regulator_dev *rdev,
163 unsigned sel)
164{
165 struct rk808_regulator_data *pdata = rdev_get_drvdata(rdev);
166 int id = rdev->desc->id - RK808_ID_DCDC1;
167 struct gpio_desc *gpio = pdata->dvs_gpio[id];
168 unsigned int reg = rdev->desc->vsel_reg;
169 unsigned old_sel;
170 int ret, gpio_level;
171
172 if (!gpio)
173 return rk808_buck1_2_i2c_set_voltage_sel(rdev, sel);
174
175 gpio_level = gpiod_get_value(gpio);
176 if (gpio_level == 0) {
177 reg += RK808_DVS_REG_OFFSET;
178 ret = regmap_read(rdev->regmap, rdev->desc->vsel_reg, &old_sel);
179 } else {
180 ret = regmap_read(rdev->regmap,
181 reg + RK808_DVS_REG_OFFSET,
182 &old_sel);
183 }
184
185 if (ret != 0)
186 return ret;
187
188 sel <<= ffs(rdev->desc->vsel_mask) - 1;
189 sel |= old_sel & ~rdev->desc->vsel_mask;
190
191 ret = regmap_write(rdev->regmap, reg, sel);
192 if (ret)
193 return ret;
194
195 gpiod_set_value(gpio, !gpio_level);
196
197 return ret;
198}
199
200static int rk808_buck1_2_set_voltage_time_sel(struct regulator_dev *rdev,
201 unsigned int old_selector,
202 unsigned int new_selector)
203{
204 struct rk808_regulator_data *pdata = rdev_get_drvdata(rdev);
205 int id = rdev->desc->id - RK808_ID_DCDC1;
206 struct gpio_desc *gpio = pdata->dvs_gpio[id];
207
208 /* if there is no dvs1/2 pin, we don't need wait extra time here. */
209 if (!gpio)
210 return 0;
211
212 return regulator_set_voltage_time_sel(rdev, old_selector, new_selector);
213}
214
73static int rk808_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay) 215static int rk808_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
74{ 216{
75 unsigned int ramp_value = RK808_RAMP_RATE_10MV_PER_US; 217 unsigned int ramp_value = RK808_RAMP_RATE_10MV_PER_US;
@@ -137,8 +279,9 @@ static int rk808_set_suspend_disable(struct regulator_dev *rdev)
137static struct regulator_ops rk808_buck1_2_ops = { 279static struct regulator_ops rk808_buck1_2_ops = {
138 .list_voltage = regulator_list_voltage_linear_range, 280 .list_voltage = regulator_list_voltage_linear_range,
139 .map_voltage = regulator_map_voltage_linear_range, 281 .map_voltage = regulator_map_voltage_linear_range,
140 .get_voltage_sel = regulator_get_voltage_sel_regmap, 282 .get_voltage_sel = rk808_buck1_2_get_voltage_sel_regmap,
141 .set_voltage_sel = regulator_set_voltage_sel_regmap, 283 .set_voltage_sel = rk808_buck1_2_set_voltage_sel,
284 .set_voltage_time_sel = rk808_buck1_2_set_voltage_time_sel,
142 .enable = regulator_enable_regmap, 285 .enable = regulator_enable_regmap,
143 .disable = regulator_disable_regmap, 286 .disable = regulator_disable_regmap,
144 .is_enabled = regulator_is_enabled_regmap, 287 .is_enabled = regulator_is_enabled_regmap,
@@ -380,25 +523,69 @@ static struct of_regulator_match rk808_reg_matches[] = {
380 [RK808_ID_SWITCH2] = { .name = "SWITCH_REG2" }, 523 [RK808_ID_SWITCH2] = { .name = "SWITCH_REG2" },
381}; 524};
382 525
526static int rk808_regulator_dt_parse_pdata(struct device *dev,
527 struct device *client_dev,
528 struct regmap *map,
529 struct rk808_regulator_data *pdata)
530{
531 struct device_node *np;
532 int tmp, ret, i;
533
534 np = of_get_child_by_name(client_dev->of_node, "regulators");
535 if (!np)
536 return -ENXIO;
537
538 ret = of_regulator_match(dev, np, rk808_reg_matches,
539 RK808_NUM_REGULATORS);
540 if (ret < 0)
541 goto dt_parse_end;
542
543 for (i = 0; i < ARRAY_SIZE(pdata->dvs_gpio); i++) {
544 pdata->dvs_gpio[i] =
545 devm_gpiod_get_index_optional(client_dev, "dvs", i,
546 GPIOD_OUT_LOW);
547 if (IS_ERR(pdata->dvs_gpio[i])) {
548 ret = PTR_ERR(pdata->dvs_gpio[i]);
549 dev_err(dev, "failed to get dvs%d gpio (%d)\n", i, ret);
550 goto dt_parse_end;
551 }
552
553 if (!pdata->dvs_gpio[i]) {
554 dev_warn(dev, "there is no dvs%d gpio\n", i);
555 continue;
556 }
557
558 tmp = i ? RK808_DVS2_POL : RK808_DVS1_POL;
559 ret = regmap_update_bits(map, RK808_IO_POL_REG, tmp,
560 gpiod_is_active_low(pdata->dvs_gpio[i]) ?
561 0 : tmp);
562 }
563
564dt_parse_end:
565 of_node_put(np);
566 return ret;
567}
568
383static int rk808_regulator_probe(struct platform_device *pdev) 569static int rk808_regulator_probe(struct platform_device *pdev)
384{ 570{
385 struct rk808 *rk808 = dev_get_drvdata(pdev->dev.parent); 571 struct rk808 *rk808 = dev_get_drvdata(pdev->dev.parent);
386 struct i2c_client *client = rk808->i2c; 572 struct i2c_client *client = rk808->i2c;
387 struct device_node *reg_np;
388 struct regulator_config config = {}; 573 struct regulator_config config = {};
389 struct regulator_dev *rk808_rdev; 574 struct regulator_dev *rk808_rdev;
575 struct rk808_regulator_data *pdata;
390 int ret, i; 576 int ret, i;
391 577
392 reg_np = of_get_child_by_name(client->dev.of_node, "regulators"); 578 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
393 if (!reg_np) 579 if (!pdata)
394 return -ENXIO; 580 return -ENOMEM;
395 581
396 ret = of_regulator_match(&pdev->dev, reg_np, rk808_reg_matches, 582 ret = rk808_regulator_dt_parse_pdata(&pdev->dev, &client->dev,
397 RK808_NUM_REGULATORS); 583 rk808->regmap, pdata);
398 of_node_put(reg_np);
399 if (ret < 0) 584 if (ret < 0)
400 return ret; 585 return ret;
401 586
587 platform_set_drvdata(pdev, pdata);
588
402 /* Instantiate the regulators */ 589 /* Instantiate the regulators */
403 for (i = 0; i < RK808_NUM_REGULATORS; i++) { 590 for (i = 0; i < RK808_NUM_REGULATORS; i++) {
404 if (!rk808_reg_matches[i].init_data || 591 if (!rk808_reg_matches[i].init_data ||
@@ -406,7 +593,7 @@ static int rk808_regulator_probe(struct platform_device *pdev)
406 continue; 593 continue;
407 594
408 config.dev = &client->dev; 595 config.dev = &client->dev;
409 config.driver_data = rk808; 596 config.driver_data = pdata;
410 config.regmap = rk808->regmap; 597 config.regmap = rk808->regmap;
411 config.of_node = rk808_reg_matches[i].of_node; 598 config.of_node = rk808_reg_matches[i].of_node;
412 config.init_data = rk808_reg_matches[i].init_data; 599 config.init_data = rk808_reg_matches[i].init_data;
@@ -427,6 +614,7 @@ static struct platform_driver rk808_regulator_driver = {
427 .probe = rk808_regulator_probe, 614 .probe = rk808_regulator_probe,
428 .driver = { 615 .driver = {
429 .name = "rk808-regulator", 616 .name = "rk808-regulator",
617 .owner = THIS_MODULE,
430 }, 618 },
431}; 619};
432 620
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index 326ffb553371..72fc3c32db49 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -34,6 +34,8 @@
34#include <linux/mfd/samsung/s2mps14.h> 34#include <linux/mfd/samsung/s2mps14.h>
35#include <linux/mfd/samsung/s2mpu02.h> 35#include <linux/mfd/samsung/s2mpu02.h>
36 36
37/* The highest number of possible regulators for supported devices. */
38#define S2MPS_REGULATOR_MAX S2MPS13_REGULATOR_MAX
37struct s2mps11_info { 39struct s2mps11_info {
38 unsigned int rdev_num; 40 unsigned int rdev_num;
39 int ramp_delay2; 41 int ramp_delay2;
@@ -49,7 +51,7 @@ struct s2mps11_info {
49 * One bit for each S2MPS13/S2MPS14/S2MPU02 regulator whether 51 * One bit for each S2MPS13/S2MPS14/S2MPU02 regulator whether
50 * the suspend mode was enabled. 52 * the suspend mode was enabled.
51 */ 53 */
52 unsigned long long s2mps14_suspend_state:50; 54 DECLARE_BITMAP(suspend_state, S2MPS_REGULATOR_MAX);
53 55
54 /* Array of size rdev_num with GPIO-s for external sleep control */ 56 /* Array of size rdev_num with GPIO-s for external sleep control */
55 int *ext_control_gpio; 57 int *ext_control_gpio;
@@ -500,7 +502,7 @@ static int s2mps14_regulator_enable(struct regulator_dev *rdev)
500 switch (s2mps11->dev_type) { 502 switch (s2mps11->dev_type) {
501 case S2MPS13X: 503 case S2MPS13X:
502 case S2MPS14X: 504 case S2MPS14X:
503 if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev))) 505 if (test_bit(rdev_get_id(rdev), s2mps11->suspend_state))
504 val = S2MPS14_ENABLE_SUSPEND; 506 val = S2MPS14_ENABLE_SUSPEND;
505 else if (gpio_is_valid(s2mps11->ext_control_gpio[rdev_get_id(rdev)])) 507 else if (gpio_is_valid(s2mps11->ext_control_gpio[rdev_get_id(rdev)]))
506 val = S2MPS14_ENABLE_EXT_CONTROL; 508 val = S2MPS14_ENABLE_EXT_CONTROL;
@@ -508,7 +510,7 @@ static int s2mps14_regulator_enable(struct regulator_dev *rdev)
508 val = rdev->desc->enable_mask; 510 val = rdev->desc->enable_mask;
509 break; 511 break;
510 case S2MPU02: 512 case S2MPU02:
511 if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev))) 513 if (test_bit(rdev_get_id(rdev), s2mps11->suspend_state))
512 val = S2MPU02_ENABLE_SUSPEND; 514 val = S2MPU02_ENABLE_SUSPEND;
513 else 515 else
514 val = rdev->desc->enable_mask; 516 val = rdev->desc->enable_mask;
@@ -562,7 +564,7 @@ static int s2mps14_regulator_set_suspend_disable(struct regulator_dev *rdev)
562 if (ret < 0) 564 if (ret < 0)
563 return ret; 565 return ret;
564 566
565 s2mps11->s2mps14_suspend_state |= (1 << rdev_get_id(rdev)); 567 set_bit(rdev_get_id(rdev), s2mps11->suspend_state);
566 /* 568 /*
567 * Don't enable suspend mode if regulator is already disabled because 569 * Don't enable suspend mode if regulator is already disabled because
568 * this would effectively for a short time turn on the regulator after 570 * this would effectively for a short time turn on the regulator after
@@ -960,18 +962,22 @@ static int s2mps11_pmic_probe(struct platform_device *pdev)
960 case S2MPS11X: 962 case S2MPS11X:
961 s2mps11->rdev_num = ARRAY_SIZE(s2mps11_regulators); 963 s2mps11->rdev_num = ARRAY_SIZE(s2mps11_regulators);
962 regulators = s2mps11_regulators; 964 regulators = s2mps11_regulators;
965 BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
963 break; 966 break;
964 case S2MPS13X: 967 case S2MPS13X:
965 s2mps11->rdev_num = ARRAY_SIZE(s2mps13_regulators); 968 s2mps11->rdev_num = ARRAY_SIZE(s2mps13_regulators);
966 regulators = s2mps13_regulators; 969 regulators = s2mps13_regulators;
970 BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
967 break; 971 break;
968 case S2MPS14X: 972 case S2MPS14X:
969 s2mps11->rdev_num = ARRAY_SIZE(s2mps14_regulators); 973 s2mps11->rdev_num = ARRAY_SIZE(s2mps14_regulators);
970 regulators = s2mps14_regulators; 974 regulators = s2mps14_regulators;
975 BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
971 break; 976 break;
972 case S2MPU02: 977 case S2MPU02:
973 s2mps11->rdev_num = ARRAY_SIZE(s2mpu02_regulators); 978 s2mps11->rdev_num = ARRAY_SIZE(s2mpu02_regulators);
974 regulators = s2mpu02_regulators; 979 regulators = s2mpu02_regulators;
980 BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
975 break; 981 break;
976 default: 982 default:
977 dev_err(&pdev->dev, "Invalid device type: %u\n", 983 dev_err(&pdev->dev, "Invalid device type: %u\n",
diff --git a/drivers/regulator/tps51632-regulator.c b/drivers/regulator/tps51632-regulator.c
index c213e37eb69e..572816e30095 100644
--- a/drivers/regulator/tps51632-regulator.c
+++ b/drivers/regulator/tps51632-regulator.c
@@ -362,7 +362,6 @@ MODULE_DEVICE_TABLE(i2c, tps51632_id);
362static struct i2c_driver tps51632_i2c_driver = { 362static struct i2c_driver tps51632_i2c_driver = {
363 .driver = { 363 .driver = {
364 .name = "tps51632", 364 .name = "tps51632",
365 .owner = THIS_MODULE,
366 .of_match_table = of_match_ptr(tps51632_of_match), 365 .of_match_table = of_match_ptr(tps51632_of_match),
367 }, 366 },
368 .probe = tps51632_probe, 367 .probe = tps51632_probe,
diff --git a/drivers/regulator/tps62360-regulator.c b/drivers/regulator/tps62360-regulator.c
index a1fd626c6c96..f6a6d36a6533 100644
--- a/drivers/regulator/tps62360-regulator.c
+++ b/drivers/regulator/tps62360-regulator.c
@@ -515,7 +515,6 @@ MODULE_DEVICE_TABLE(i2c, tps62360_id);
515static struct i2c_driver tps62360_i2c_driver = { 515static struct i2c_driver tps62360_i2c_driver = {
516 .driver = { 516 .driver = {
517 .name = "tps62360", 517 .name = "tps62360",
518 .owner = THIS_MODULE,
519 .of_match_table = of_match_ptr(tps62360_of_match), 518 .of_match_table = of_match_ptr(tps62360_of_match),
520 }, 519 },
521 .probe = tps62360_probe, 520 .probe = tps62360_probe,
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index b941e564b3f3..5cc19b44974a 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -410,7 +410,6 @@ MODULE_DEVICE_TABLE(i2c, tps_65023_id);
410static struct i2c_driver tps_65023_i2c_driver = { 410static struct i2c_driver tps_65023_i2c_driver = {
411 .driver = { 411 .driver = {
412 .name = "tps65023", 412 .name = "tps65023",
413 .owner = THIS_MODULE,
414 }, 413 },
415 .probe = tps_65023_probe, 414 .probe = tps_65023_probe,
416 .id_table = tps_65023_id, 415 .id_table = tps_65023_id,
diff --git a/drivers/rtc/rtc-armada38x.c b/drivers/rtc/rtc-armada38x.c
index 4b62d1a875e4..2b08cac62f07 100644
--- a/drivers/rtc/rtc-armada38x.c
+++ b/drivers/rtc/rtc-armada38x.c
@@ -88,7 +88,7 @@ static int armada38x_rtc_set_time(struct device *dev, struct rtc_time *tm)
88{ 88{
89 struct armada38x_rtc *rtc = dev_get_drvdata(dev); 89 struct armada38x_rtc *rtc = dev_get_drvdata(dev);
90 int ret = 0; 90 int ret = 0;
91 unsigned long time, flags; 91 unsigned long time;
92 92
93 ret = rtc_tm_to_time(tm, &time); 93 ret = rtc_tm_to_time(tm, &time);
94 94
diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c
index c0090b698ff3..eab230be5a54 100644
--- a/drivers/rtc/rtc-mt6397.c
+++ b/drivers/rtc/rtc-mt6397.c
@@ -343,6 +343,8 @@ static int mtk_rtc_probe(struct platform_device *pdev)
343 goto out_dispose_irq; 343 goto out_dispose_irq;
344 } 344 }
345 345
346 device_init_wakeup(&pdev->dev, 1);
347
346 rtc->rtc_dev = rtc_device_register("mt6397-rtc", &pdev->dev, 348 rtc->rtc_dev = rtc_device_register("mt6397-rtc", &pdev->dev,
347 &mtk_rtc_ops, THIS_MODULE); 349 &mtk_rtc_ops, THIS_MODULE);
348 if (IS_ERR(rtc->rtc_dev)) { 350 if (IS_ERR(rtc->rtc_dev)) {
@@ -351,8 +353,6 @@ static int mtk_rtc_probe(struct platform_device *pdev)
351 goto out_free_irq; 353 goto out_free_irq;
352 } 354 }
353 355
354 device_init_wakeup(&pdev->dev, 1);
355
356 return 0; 356 return 0;
357 357
358out_free_irq: 358out_free_irq:
diff --git a/drivers/s390/Makefile b/drivers/s390/Makefile
index 95bccfd3f169..e5225ad9c5b1 100644
--- a/drivers/s390/Makefile
+++ b/drivers/s390/Makefile
@@ -2,7 +2,7 @@
2# Makefile for the S/390 specific device drivers 2# Makefile for the S/390 specific device drivers
3# 3#
4 4
5obj-y += cio/ block/ char/ crypto/ net/ scsi/ kvm/ 5obj-y += cio/ block/ char/ crypto/ net/ scsi/ virtio/
6 6
7drivers-y += drivers/s390/built-in.o 7drivers-y += drivers/s390/built-in.o
8 8
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 1aec8ff0b587..f73d2f579a7e 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1863,6 +1863,33 @@ static void __dasd_device_check_expire(struct dasd_device *device)
1863} 1863}
1864 1864
1865/* 1865/*
1866 * return 1 when device is not eligible for IO
1867 */
1868static int __dasd_device_is_unusable(struct dasd_device *device,
1869 struct dasd_ccw_req *cqr)
1870{
1871 int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM);
1872
1873 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
1874 /* dasd is being set offline. */
1875 return 1;
1876 }
1877 if (device->stopped) {
1878 if (device->stopped & mask) {
1879 /* stopped and CQR will not change that. */
1880 return 1;
1881 }
1882 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
1883 /* CQR is not able to change device to
1884 * operational. */
1885 return 1;
1886 }
1887 /* CQR required to get device operational. */
1888 }
1889 return 0;
1890}
1891
1892/*
1866 * Take a look at the first request on the ccw queue and check 1893 * Take a look at the first request on the ccw queue and check
1867 * if it needs to be started. 1894 * if it needs to be started.
1868 */ 1895 */
@@ -1876,13 +1903,8 @@ static void __dasd_device_start_head(struct dasd_device *device)
1876 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1903 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1877 if (cqr->status != DASD_CQR_QUEUED) 1904 if (cqr->status != DASD_CQR_QUEUED)
1878 return; 1905 return;
1879 /* when device is stopped, return request to previous layer 1906 /* if device is not usable return request to upper layer */
1880 * exception: only the disconnect or unresumed bits are set and the 1907 if (__dasd_device_is_unusable(device, cqr)) {
1881 * cqr is a path verification request
1882 */
1883 if (device->stopped &&
1884 !(!(device->stopped & ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM))
1885 && test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))) {
1886 cqr->intrc = -EAGAIN; 1908 cqr->intrc = -EAGAIN;
1887 cqr->status = DASD_CQR_CLEARED; 1909 cqr->status = DASD_CQR_CLEARED;
1888 dasd_schedule_device_bh(device); 1910 dasd_schedule_device_bh(device);
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index a2597e683e79..ee3a6faae22a 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -699,7 +699,8 @@ struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
699 struct dasd_device, alias_list); 699 struct dasd_device, alias_list);
700 spin_unlock_irqrestore(&lcu->lock, flags); 700 spin_unlock_irqrestore(&lcu->lock, flags);
701 alias_priv = (struct dasd_eckd_private *) alias_device->private; 701 alias_priv = (struct dasd_eckd_private *) alias_device->private;
702 if ((alias_priv->count < private->count) && !alias_device->stopped) 702 if ((alias_priv->count < private->count) && !alias_device->stopped &&
703 !test_bit(DASD_FLAG_OFFLINE, &alias_device->flags))
703 return alias_device; 704 return alias_device;
704 else 705 else
705 return NULL; 706 return NULL;
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index aeed7969fd79..7bc6df3100ef 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -7,6 +7,7 @@
7#define KMSG_COMPONENT "sclp_early" 7#define KMSG_COMPONENT "sclp_early"
8#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 8#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9 9
10#include <linux/errno.h>
10#include <asm/ctl_reg.h> 11#include <asm/ctl_reg.h>
11#include <asm/sclp.h> 12#include <asm/sclp.h>
12#include <asm/ipl.h> 13#include <asm/ipl.h>
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 08f1830cbfc4..01bf1f5cf2e9 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -54,6 +54,10 @@ MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \
54 "Copyright IBM Corp. 2001, 2012"); 54 "Copyright IBM Corp. 2001, 2012");
55MODULE_LICENSE("GPL"); 55MODULE_LICENSE("GPL");
56 56
57static int zcrypt_hwrng_seed = 1;
58module_param_named(hwrng_seed, zcrypt_hwrng_seed, int, S_IRUSR|S_IRGRP);
59MODULE_PARM_DESC(hwrng_seed, "Turn on/off hwrng auto seed, default is 1 (on).");
60
57static DEFINE_SPINLOCK(zcrypt_device_lock); 61static DEFINE_SPINLOCK(zcrypt_device_lock);
58static LIST_HEAD(zcrypt_device_list); 62static LIST_HEAD(zcrypt_device_list);
59static int zcrypt_device_count = 0; 63static int zcrypt_device_count = 0;
@@ -1373,6 +1377,7 @@ static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data)
1373static struct hwrng zcrypt_rng_dev = { 1377static struct hwrng zcrypt_rng_dev = {
1374 .name = "zcrypt", 1378 .name = "zcrypt",
1375 .data_read = zcrypt_rng_data_read, 1379 .data_read = zcrypt_rng_data_read,
1380 .quality = 990,
1376}; 1381};
1377 1382
1378static int zcrypt_rng_device_add(void) 1383static int zcrypt_rng_device_add(void)
@@ -1387,6 +1392,8 @@ static int zcrypt_rng_device_add(void)
1387 goto out; 1392 goto out;
1388 } 1393 }
1389 zcrypt_rng_buffer_index = 0; 1394 zcrypt_rng_buffer_index = 0;
1395 if (!zcrypt_hwrng_seed)
1396 zcrypt_rng_dev.quality = 0;
1390 rc = hwrng_register(&zcrypt_rng_dev); 1397 rc = hwrng_register(&zcrypt_rng_dev);
1391 if (rc) 1398 if (rc)
1392 goto out_free; 1399 goto out_free;
diff --git a/drivers/s390/kvm/Makefile b/drivers/s390/virtio/Makefile
index 241891a57caf..241891a57caf 100644
--- a/drivers/s390/kvm/Makefile
+++ b/drivers/s390/virtio/Makefile
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/virtio/kvm_virtio.c
index 53fb975c404b..53fb975c404b 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/virtio/kvm_virtio.c
diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index f8d8fdb26b72..f8d8fdb26b72 100644
--- a/drivers/s390/kvm/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 26270c351624..ce129e595b55 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -39,7 +39,7 @@
39 39
40#define DRV_NAME "fnic" 40#define DRV_NAME "fnic"
41#define DRV_DESCRIPTION "Cisco FCoE HBA Driver" 41#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
42#define DRV_VERSION "1.6.0.17" 42#define DRV_VERSION "1.6.0.17a"
43#define PFX DRV_NAME ": " 43#define PFX DRV_NAME ": "
44#define DFX DRV_NAME "%d: " 44#define DFX DRV_NAME "%d: "
45 45
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 155b286f1a9d..25436cd2860c 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -425,6 +425,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
425 unsigned long ptr; 425 unsigned long ptr;
426 struct fc_rport_priv *rdata; 426 struct fc_rport_priv *rdata;
427 spinlock_t *io_lock = NULL; 427 spinlock_t *io_lock = NULL;
428 int io_lock_acquired = 0;
428 429
429 if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) 430 if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
430 return SCSI_MLQUEUE_HOST_BUSY; 431 return SCSI_MLQUEUE_HOST_BUSY;
@@ -518,6 +519,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
518 spin_lock_irqsave(io_lock, flags); 519 spin_lock_irqsave(io_lock, flags);
519 520
520 /* initialize rest of io_req */ 521 /* initialize rest of io_req */
522 io_lock_acquired = 1;
521 io_req->port_id = rport->port_id; 523 io_req->port_id = rport->port_id;
522 io_req->start_time = jiffies; 524 io_req->start_time = jiffies;
523 CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING; 525 CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
@@ -571,7 +573,7 @@ out:
571 (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc))); 573 (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
572 574
573 /* if only we issued IO, will we have the io lock */ 575 /* if only we issued IO, will we have the io lock */
574 if (CMD_FLAGS(sc) & FNIC_IO_INITIALIZED) 576 if (io_lock_acquired)
575 spin_unlock_irqrestore(io_lock, flags); 577 spin_unlock_irqrestore(io_lock, flags);
576 578
577 atomic_dec(&fnic->in_flight); 579 atomic_dec(&fnic->in_flight);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 882744852aac..a9aa38903efe 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -599,9 +599,10 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
599{ 599{
600 struct ipr_trace_entry *trace_entry; 600 struct ipr_trace_entry *trace_entry;
601 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 601 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
602 unsigned int trace_index;
602 603
603 trace_entry = &ioa_cfg->trace[atomic_add_return 604 trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
604 (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES]; 605 trace_entry = &ioa_cfg->trace[trace_index];
605 trace_entry->time = jiffies; 606 trace_entry->time = jiffies;
606 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0]; 607 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
607 trace_entry->type = type; 608 trace_entry->type = type;
@@ -1051,10 +1052,15 @@ static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1051 1052
1052static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg) 1053static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1053{ 1054{
1055 unsigned int hrrq;
1056
1054 if (ioa_cfg->hrrq_num == 1) 1057 if (ioa_cfg->hrrq_num == 1)
1055 return 0; 1058 hrrq = 0;
1056 else 1059 else {
1057 return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1; 1060 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1061 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1062 }
1063 return hrrq;
1058} 1064}
1059 1065
1060/** 1066/**
@@ -6263,21 +6269,23 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6263 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6269 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6264 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 6270 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6265 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 6271 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6266 unsigned long hrrq_flags; 6272 unsigned long lock_flags;
6267 6273
6268 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len)); 6274 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6269 6275
6270 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) { 6276 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6271 scsi_dma_unmap(scsi_cmd); 6277 scsi_dma_unmap(scsi_cmd);
6272 6278
6273 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags); 6279 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
6274 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 6280 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6275 scsi_cmd->scsi_done(scsi_cmd); 6281 scsi_cmd->scsi_done(scsi_cmd);
6276 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags); 6282 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
6277 } else { 6283 } else {
6278 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags); 6284 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6285 spin_lock(&ipr_cmd->hrrq->_lock);
6279 ipr_erp_start(ioa_cfg, ipr_cmd); 6286 ipr_erp_start(ioa_cfg, ipr_cmd);
6280 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags); 6287 spin_unlock(&ipr_cmd->hrrq->_lock);
6288 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6281 } 6289 }
6282} 6290}
6283 6291
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 73790a1d0969..6b97ee45c7b4 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1486,6 +1486,7 @@ struct ipr_ioa_cfg {
1486 1486
1487#define IPR_NUM_TRACE_INDEX_BITS 8 1487#define IPR_NUM_TRACE_INDEX_BITS 8
1488#define IPR_NUM_TRACE_ENTRIES (1 << IPR_NUM_TRACE_INDEX_BITS) 1488#define IPR_NUM_TRACE_ENTRIES (1 << IPR_NUM_TRACE_INDEX_BITS)
1489#define IPR_TRACE_INDEX_MASK (IPR_NUM_TRACE_ENTRIES - 1)
1489#define IPR_TRACE_SIZE (sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES) 1490#define IPR_TRACE_SIZE (sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES)
1490 char trace_start[8]; 1491 char trace_start[8];
1491#define IPR_TRACE_START_LABEL "trace" 1492#define IPR_TRACE_START_LABEL "trace"
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 1b3a09473452..30f9ef0c0d4f 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -733,8 +733,6 @@ static bool fc_invoke_resp(struct fc_exch *ep, struct fc_seq *sp,
733 if (resp) { 733 if (resp) {
734 resp(sp, fp, arg); 734 resp(sp, fp, arg);
735 res = true; 735 res = true;
736 } else if (!IS_ERR(fp)) {
737 fc_frame_free(fp);
738 } 736 }
739 737
740 spin_lock_bh(&ep->ex_lock); 738 spin_lock_bh(&ep->ex_lock);
@@ -1596,7 +1594,8 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1596 * If new exch resp handler is valid then call that 1594 * If new exch resp handler is valid then call that
1597 * first. 1595 * first.
1598 */ 1596 */
1599 fc_invoke_resp(ep, sp, fp); 1597 if (!fc_invoke_resp(ep, sp, fp))
1598 fc_frame_free(fp);
1600 1599
1601 fc_exch_release(ep); 1600 fc_exch_release(ep);
1602 return; 1601 return;
@@ -1695,7 +1694,8 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
1695 fc_exch_hold(ep); 1694 fc_exch_hold(ep);
1696 if (!rc) 1695 if (!rc)
1697 fc_exch_delete(ep); 1696 fc_exch_delete(ep);
1698 fc_invoke_resp(ep, sp, fp); 1697 if (!fc_invoke_resp(ep, sp, fp))
1698 fc_frame_free(fp);
1699 if (has_rec) 1699 if (has_rec)
1700 fc_exch_timer_set(ep, ep->r_a_tov); 1700 fc_exch_timer_set(ep, ep->r_a_tov);
1701 fc_exch_release(ep); 1701 fc_exch_release(ep);
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index c6795941b45d..2d5909c4685c 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -1039,11 +1039,26 @@ restart:
1039 fc_fcp_pkt_hold(fsp); 1039 fc_fcp_pkt_hold(fsp);
1040 spin_unlock_irqrestore(&si->scsi_queue_lock, flags); 1040 spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
1041 1041
1042 if (!fc_fcp_lock_pkt(fsp)) { 1042 spin_lock_bh(&fsp->scsi_pkt_lock);
1043 if (!(fsp->state & FC_SRB_COMPL)) {
1044 fsp->state |= FC_SRB_COMPL;
1045 /*
1046 * TODO: dropping scsi_pkt_lock and then reacquiring
1047 * again around fc_fcp_cleanup_cmd() is required,
1048 * since fc_fcp_cleanup_cmd() calls into
1049 * fc_seq_set_resp() and that func preempts cpu using
1050 * schedule. May be schedule and related code should be
1051 * removed instead of unlocking here to avoid scheduling
1052 * while atomic bug.
1053 */
1054 spin_unlock_bh(&fsp->scsi_pkt_lock);
1055
1043 fc_fcp_cleanup_cmd(fsp, error); 1056 fc_fcp_cleanup_cmd(fsp, error);
1057
1058 spin_lock_bh(&fsp->scsi_pkt_lock);
1044 fc_io_compl(fsp); 1059 fc_io_compl(fsp);
1045 fc_fcp_unlock_pkt(fsp);
1046 } 1060 }
1061 spin_unlock_bh(&fsp->scsi_pkt_lock);
1047 1062
1048 fc_fcp_pkt_release(fsp); 1063 fc_fcp_pkt_release(fsp);
1049 spin_lock_irqsave(&si->scsi_queue_lock, flags); 1064 spin_lock_irqsave(&si->scsi_queue_lock, flags);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 8053f24f0349..98d9bb6ff725 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -2941,10 +2941,10 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
2941{ 2941{
2942 struct iscsi_conn *conn = cls_conn->dd_data; 2942 struct iscsi_conn *conn = cls_conn->dd_data;
2943 struct iscsi_session *session = conn->session; 2943 struct iscsi_session *session = conn->session;
2944 unsigned long flags;
2945 2944
2946 del_timer_sync(&conn->transport_timer); 2945 del_timer_sync(&conn->transport_timer);
2947 2946
2947 mutex_lock(&session->eh_mutex);
2948 spin_lock_bh(&session->frwd_lock); 2948 spin_lock_bh(&session->frwd_lock);
2949 conn->c_stage = ISCSI_CONN_CLEANUP_WAIT; 2949 conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
2950 if (session->leadconn == conn) { 2950 if (session->leadconn == conn) {
@@ -2956,28 +2956,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
2956 } 2956 }
2957 spin_unlock_bh(&session->frwd_lock); 2957 spin_unlock_bh(&session->frwd_lock);
2958 2958
2959 /*
2960 * Block until all in-progress commands for this connection
2961 * time out or fail.
2962 */
2963 for (;;) {
2964 spin_lock_irqsave(session->host->host_lock, flags);
2965 if (!atomic_read(&session->host->host_busy)) { /* OK for ERL == 0 */
2966 spin_unlock_irqrestore(session->host->host_lock, flags);
2967 break;
2968 }
2969 spin_unlock_irqrestore(session->host->host_lock, flags);
2970 msleep_interruptible(500);
2971 iscsi_conn_printk(KERN_INFO, conn, "iscsi conn_destroy(): "
2972 "host_busy %d host_failed %d\n",
2973 atomic_read(&session->host->host_busy),
2974 session->host->host_failed);
2975 /*
2976 * force eh_abort() to unblock
2977 */
2978 wake_up(&conn->ehwait);
2979 }
2980
2981 /* flush queued up work because we free the connection below */ 2959 /* flush queued up work because we free the connection below */
2982 iscsi_suspend_tx(conn); 2960 iscsi_suspend_tx(conn);
2983 2961
@@ -2994,6 +2972,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
2994 if (session->leadconn == conn) 2972 if (session->leadconn == conn)
2995 session->leadconn = NULL; 2973 session->leadconn = NULL;
2996 spin_unlock_bh(&session->frwd_lock); 2974 spin_unlock_bh(&session->frwd_lock);
2975 mutex_unlock(&session->eh_mutex);
2997 2976
2998 iscsi_destroy_conn(cls_conn); 2977 iscsi_destroy_conn(cls_conn);
2999} 2978}
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 82b92c414a9c..437254e1c4de 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -738,7 +738,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
738 ql_log(ql_log_info, vha, 0x706f, 738 ql_log(ql_log_info, vha, 0x706f,
739 "Issuing MPI reset.\n"); 739 "Issuing MPI reset.\n");
740 740
741 if (IS_QLA83XX(ha)) { 741 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
742 uint32_t idc_control; 742 uint32_t idc_control;
743 743
744 qla83xx_idc_lock(vha, 0); 744 qla83xx_idc_lock(vha, 0);
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 0e6ee3ca30e6..8b011aef12bd 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -67,10 +67,10 @@
67 * | | | 0xd031-0xd0ff | 67 * | | | 0xd031-0xd0ff |
68 * | | | 0xd101-0xd1fe | 68 * | | | 0xd101-0xd1fe |
69 * | | | 0xd214-0xd2fe | 69 * | | | 0xd214-0xd2fe |
70 * | Target Mode | 0xe079 | | 70 * | Target Mode | 0xe080 | |
71 * | Target Mode Management | 0xf072 | 0xf002 | 71 * | Target Mode Management | 0xf096 | 0xf002 |
72 * | | | 0xf046-0xf049 | 72 * | | | 0xf046-0xf049 |
73 * | Target Mode Task Management | 0x1000b | | 73 * | Target Mode Task Management | 0x1000d | |
74 * ---------------------------------------------------------------------- 74 * ----------------------------------------------------------------------
75 */ 75 */
76 76
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index e86201d3b8c6..9ad819edcd67 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -274,6 +274,7 @@
274#define RESPONSE_ENTRY_CNT_FX00 256 /* Number of response entries.*/ 274#define RESPONSE_ENTRY_CNT_FX00 256 /* Number of response entries.*/
275 275
276struct req_que; 276struct req_que;
277struct qla_tgt_sess;
277 278
278/* 279/*
279 * (sd.h is not exported, hence local inclusion) 280 * (sd.h is not exported, hence local inclusion)
@@ -2026,6 +2027,7 @@ typedef struct fc_port {
2026 uint16_t port_id; 2027 uint16_t port_id;
2027 2028
2028 unsigned long retry_delay_timestamp; 2029 unsigned long retry_delay_timestamp;
2030 struct qla_tgt_sess *tgt_session;
2029} fc_port_t; 2031} fc_port_t;
2030 2032
2031#include "qla_mr.h" 2033#include "qla_mr.h"
@@ -3154,13 +3156,13 @@ struct qla_hw_data {
3154/* Bit 21 of fw_attributes decides the MCTP capabilities */ 3156/* Bit 21 of fw_attributes decides the MCTP capabilities */
3155#define IS_MCTP_CAPABLE(ha) (IS_QLA2031(ha) && \ 3157#define IS_MCTP_CAPABLE(ha) (IS_QLA2031(ha) && \
3156 ((ha)->fw_attributes_ext[0] & BIT_0)) 3158 ((ha)->fw_attributes_ext[0] & BIT_0))
3157#define IS_PI_UNINIT_CAPABLE(ha) (IS_QLA83XX(ha)) 3159#define IS_PI_UNINIT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
3158#define IS_PI_IPGUARD_CAPABLE(ha) (IS_QLA83XX(ha)) 3160#define IS_PI_IPGUARD_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
3159#define IS_PI_DIFB_DIX0_CAPABLE(ha) (0) 3161#define IS_PI_DIFB_DIX0_CAPABLE(ha) (0)
3160#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha)) 3162#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
3161#define IS_PI_SPLIT_DET_CAPABLE(ha) (IS_PI_SPLIT_DET_CAPABLE_HBA(ha) && \ 3163#define IS_PI_SPLIT_DET_CAPABLE(ha) (IS_PI_SPLIT_DET_CAPABLE_HBA(ha) && \
3162 (((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22)) 3164 (((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22))
3163#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha)) 3165#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
3164#define IS_TGT_MODE_CAPABLE(ha) (ha->tgt.atio_q_length) 3166#define IS_TGT_MODE_CAPABLE(ha) (ha->tgt.atio_q_length)
3165#define IS_SHADOW_REG_CAPABLE(ha) (IS_QLA27XX(ha)) 3167#define IS_SHADOW_REG_CAPABLE(ha) (IS_QLA27XX(ha))
3166#define IS_DPORT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha)) 3168#define IS_DPORT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
@@ -3579,6 +3581,16 @@ typedef struct scsi_qla_host {
3579 uint16_t fcoe_fcf_idx; 3581 uint16_t fcoe_fcf_idx;
3580 uint8_t fcoe_vn_port_mac[6]; 3582 uint8_t fcoe_vn_port_mac[6];
3581 3583
3584 /* list of commands waiting on workqueue */
3585 struct list_head qla_cmd_list;
3586 struct list_head qla_sess_op_cmd_list;
3587 spinlock_t cmd_list_lock;
3588
3589 /* Counter to detect races between ELS and RSCN events */
3590 atomic_t generation_tick;
3591 /* Time when global fcport update has been scheduled */
3592 int total_fcport_update_gen;
3593
3582 uint32_t vp_abort_cnt; 3594 uint32_t vp_abort_cnt;
3583 3595
3584 struct fc_vport *fc_vport; /* holds fc_vport * for each vport */ 3596 struct fc_vport *fc_vport; /* holds fc_vport * for each vport */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 664013115c9d..11f2f3279eab 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -115,6 +115,8 @@ qla2x00_async_iocb_timeout(void *data)
115 QLA_LOGIO_LOGIN_RETRIED : 0; 115 QLA_LOGIO_LOGIN_RETRIED : 0;
116 qla2x00_post_async_login_done_work(fcport->vha, fcport, 116 qla2x00_post_async_login_done_work(fcport->vha, fcport,
117 lio->u.logio.data); 117 lio->u.logio.data);
118 } else if (sp->type == SRB_LOGOUT_CMD) {
119 qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT);
118 } 120 }
119} 121}
120 122
@@ -497,7 +499,10 @@ void
497qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport, 499qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
498 uint16_t *data) 500 uint16_t *data)
499{ 501{
500 qla2x00_mark_device_lost(vha, fcport, 1, 0); 502 /* Don't re-login in target mode */
503 if (!fcport->tgt_session)
504 qla2x00_mark_device_lost(vha, fcport, 1, 0);
505 qlt_logo_completion_handler(fcport, data[0]);
501 return; 506 return;
502} 507}
503 508
@@ -1538,7 +1543,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1538 mem_size = (ha->fw_memory_size - 0x11000 + 1) * 1543 mem_size = (ha->fw_memory_size - 0x11000 + 1) *
1539 sizeof(uint16_t); 1544 sizeof(uint16_t);
1540 } else if (IS_FWI2_CAPABLE(ha)) { 1545 } else if (IS_FWI2_CAPABLE(ha)) {
1541 if (IS_QLA83XX(ha)) 1546 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
1542 fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem); 1547 fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
1543 else if (IS_QLA81XX(ha)) 1548 else if (IS_QLA81XX(ha))
1544 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem); 1549 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
@@ -1550,7 +1555,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1550 mem_size = (ha->fw_memory_size - 0x100000 + 1) * 1555 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
1551 sizeof(uint32_t); 1556 sizeof(uint32_t);
1552 if (ha->mqenable) { 1557 if (ha->mqenable) {
1553 if (!IS_QLA83XX(ha)) 1558 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
1554 mq_size = sizeof(struct qla2xxx_mq_chain); 1559 mq_size = sizeof(struct qla2xxx_mq_chain);
1555 /* 1560 /*
1556 * Allocate maximum buffer size for all queues. 1561 * Allocate maximum buffer size for all queues.
@@ -2922,21 +2927,14 @@ qla2x00_rport_del(void *data)
2922{ 2927{
2923 fc_port_t *fcport = data; 2928 fc_port_t *fcport = data;
2924 struct fc_rport *rport; 2929 struct fc_rport *rport;
2925 scsi_qla_host_t *vha = fcport->vha;
2926 unsigned long flags; 2930 unsigned long flags;
2927 2931
2928 spin_lock_irqsave(fcport->vha->host->host_lock, flags); 2932 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
2929 rport = fcport->drport ? fcport->drport: fcport->rport; 2933 rport = fcport->drport ? fcport->drport: fcport->rport;
2930 fcport->drport = NULL; 2934 fcport->drport = NULL;
2931 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); 2935 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
2932 if (rport) { 2936 if (rport)
2933 fc_remote_port_delete(rport); 2937 fc_remote_port_delete(rport);
2934 /*
2935 * Release the target mode FC NEXUS in qla_target.c code
2936 * if target mod is enabled.
2937 */
2938 qlt_fc_port_deleted(vha, fcport);
2939 }
2940} 2938}
2941 2939
2942/** 2940/**
@@ -3303,6 +3301,7 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
3303 * Create target mode FC NEXUS in qla_target.c if target mode is 3301 * Create target mode FC NEXUS in qla_target.c if target mode is
3304 * enabled.. 3302 * enabled..
3305 */ 3303 */
3304
3306 qlt_fc_port_added(vha, fcport); 3305 qlt_fc_port_added(vha, fcport);
3307 3306
3308 spin_lock_irqsave(fcport->vha->host->host_lock, flags); 3307 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
@@ -3341,8 +3340,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
3341 3340
3342 if (IS_QLAFX00(vha->hw)) { 3341 if (IS_QLAFX00(vha->hw)) {
3343 qla2x00_set_fcport_state(fcport, FCS_ONLINE); 3342 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
3344 qla2x00_reg_remote_port(vha, fcport); 3343 goto reg_port;
3345 return;
3346 } 3344 }
3347 fcport->login_retry = 0; 3345 fcport->login_retry = 0;
3348 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); 3346 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
@@ -3350,7 +3348,16 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
3350 qla2x00_set_fcport_state(fcport, FCS_ONLINE); 3348 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
3351 qla2x00_iidma_fcport(vha, fcport); 3349 qla2x00_iidma_fcport(vha, fcport);
3352 qla24xx_update_fcport_fcp_prio(vha, fcport); 3350 qla24xx_update_fcport_fcp_prio(vha, fcport);
3353 qla2x00_reg_remote_port(vha, fcport); 3351
3352reg_port:
3353 if (qla_ini_mode_enabled(vha))
3354 qla2x00_reg_remote_port(vha, fcport);
3355 else {
3356 /*
3357 * Create target mode FC NEXUS in qla_target.c
3358 */
3359 qlt_fc_port_added(vha, fcport);
3360 }
3354} 3361}
3355 3362
3356/* 3363/*
@@ -3375,6 +3382,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3375 LIST_HEAD(new_fcports); 3382 LIST_HEAD(new_fcports);
3376 struct qla_hw_data *ha = vha->hw; 3383 struct qla_hw_data *ha = vha->hw;
3377 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 3384 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
3385 int discovery_gen;
3378 3386
3379 /* If FL port exists, then SNS is present */ 3387 /* If FL port exists, then SNS is present */
3380 if (IS_FWI2_CAPABLE(ha)) 3388 if (IS_FWI2_CAPABLE(ha))
@@ -3445,6 +3453,14 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3445 fcport->scan_state = QLA_FCPORT_SCAN; 3453 fcport->scan_state = QLA_FCPORT_SCAN;
3446 } 3454 }
3447 3455
3456 /* Mark the time right before querying FW for connected ports.
3457 * This process is long, asynchronous and by the time it's done,
3458 * collected information might not be accurate anymore. E.g.
3459 * disconnected port might have re-connected and a brand new
3460 * session has been created. In this case session's generation
3461 * will be newer than discovery_gen. */
3462 qlt_do_generation_tick(vha, &discovery_gen);
3463
3448 rval = qla2x00_find_all_fabric_devs(vha, &new_fcports); 3464 rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
3449 if (rval != QLA_SUCCESS) 3465 if (rval != QLA_SUCCESS)
3450 break; 3466 break;
@@ -3460,20 +3476,44 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3460 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) 3476 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
3461 continue; 3477 continue;
3462 3478
3463 if (fcport->scan_state == QLA_FCPORT_SCAN && 3479 if (fcport->scan_state == QLA_FCPORT_SCAN) {
3464 atomic_read(&fcport->state) == FCS_ONLINE) { 3480 if (qla_ini_mode_enabled(base_vha) &&
3465 qla2x00_mark_device_lost(vha, fcport, 3481 atomic_read(&fcport->state) == FCS_ONLINE) {
3466 ql2xplogiabsentdevice, 0); 3482 qla2x00_mark_device_lost(vha, fcport,
3467 if (fcport->loop_id != FC_NO_LOOP_ID && 3483 ql2xplogiabsentdevice, 0);
3468 (fcport->flags & FCF_FCP2_DEVICE) == 0 && 3484 if (fcport->loop_id != FC_NO_LOOP_ID &&
3469 fcport->port_type != FCT_INITIATOR && 3485 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
3470 fcport->port_type != FCT_BROADCAST) { 3486 fcport->port_type != FCT_INITIATOR &&
3471 ha->isp_ops->fabric_logout(vha, 3487 fcport->port_type != FCT_BROADCAST) {
3472 fcport->loop_id, 3488 ha->isp_ops->fabric_logout(vha,
3473 fcport->d_id.b.domain, 3489 fcport->loop_id,
3474 fcport->d_id.b.area, 3490 fcport->d_id.b.domain,
3475 fcport->d_id.b.al_pa); 3491 fcport->d_id.b.area,
3476 qla2x00_clear_loop_id(fcport); 3492 fcport->d_id.b.al_pa);
3493 qla2x00_clear_loop_id(fcport);
3494 }
3495 } else if (!qla_ini_mode_enabled(base_vha)) {
3496 /*
3497 * In target mode, explicitly kill
3498 * sessions and log out of devices
3499 * that are gone, so that we don't
3500 * end up with an initiator using the
3501 * wrong ACL (if the fabric recycles
3502 * an FC address and we have a stale
3503 * session around) and so that we don't
3504 * report initiators that are no longer
3505 * on the fabric.
3506 */
3507 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf077,
3508 "port gone, logging out/killing session: "
3509 "%8phC state 0x%x flags 0x%x fc4_type 0x%x "
3510 "scan_state %d\n",
3511 fcport->port_name,
3512 atomic_read(&fcport->state),
3513 fcport->flags, fcport->fc4_type,
3514 fcport->scan_state);
3515 qlt_fc_port_deleted(vha, fcport,
3516 discovery_gen);
3477 } 3517 }
3478 } 3518 }
3479 } 3519 }
@@ -3494,6 +3534,28 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3494 (fcport->flags & FCF_LOGIN_NEEDED) == 0) 3534 (fcport->flags & FCF_LOGIN_NEEDED) == 0)
3495 continue; 3535 continue;
3496 3536
3537 /*
3538 * If we're not an initiator, skip looking for devices
3539 * and logging in. There's no reason for us to do it,
3540 * and it seems to actively cause problems in target
3541 * mode if we race with the initiator logging into us
3542 * (we might get the "port ID used" status back from
3543 * our login command and log out the initiator, which
3544 * seems to cause havoc).
3545 */
3546 if (!qla_ini_mode_enabled(base_vha)) {
3547 if (fcport->scan_state == QLA_FCPORT_FOUND) {
3548 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf078,
3549 "port %8phC state 0x%x flags 0x%x fc4_type 0x%x "
3550 "scan_state %d (initiator mode disabled; skipping "
3551 "login)\n", fcport->port_name,
3552 atomic_read(&fcport->state),
3553 fcport->flags, fcport->fc4_type,
3554 fcport->scan_state);
3555 }
3556 continue;
3557 }
3558
3497 if (fcport->loop_id == FC_NO_LOOP_ID) { 3559 if (fcport->loop_id == FC_NO_LOOP_ID) {
3498 fcport->loop_id = next_loopid; 3560 fcport->loop_id = next_loopid;
3499 rval = qla2x00_find_new_loop_id( 3561 rval = qla2x00_find_new_loop_id(
@@ -3520,16 +3582,38 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3520 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 3582 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3521 break; 3583 break;
3522 3584
3523 /* Find a new loop ID to use. */ 3585 /*
3524 fcport->loop_id = next_loopid; 3586 * If we're not an initiator, skip looking for devices
3525 rval = qla2x00_find_new_loop_id(base_vha, fcport); 3587 * and logging in. There's no reason for us to do it,
3526 if (rval != QLA_SUCCESS) { 3588 * and it seems to actively cause problems in target
3527 /* Ran out of IDs to use */ 3589 * mode if we race with the initiator logging into us
3528 break; 3590 * (we might get the "port ID used" status back from
3529 } 3591 * our login command and log out the initiator, which
3592 * seems to cause havoc).
3593 */
3594 if (qla_ini_mode_enabled(base_vha)) {
3595 /* Find a new loop ID to use. */
3596 fcport->loop_id = next_loopid;
3597 rval = qla2x00_find_new_loop_id(base_vha,
3598 fcport);
3599 if (rval != QLA_SUCCESS) {
3600 /* Ran out of IDs to use */
3601 break;
3602 }
3530 3603
3531 /* Login and update database */ 3604 /* Login and update database */
3532 qla2x00_fabric_dev_login(vha, fcport, &next_loopid); 3605 qla2x00_fabric_dev_login(vha, fcport,
3606 &next_loopid);
3607 } else {
3608 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf079,
3609 "new port %8phC state 0x%x flags 0x%x fc4_type "
3610 "0x%x scan_state %d (initiator mode disabled; "
3611 "skipping login)\n",
3612 fcport->port_name,
3613 atomic_read(&fcport->state),
3614 fcport->flags, fcport->fc4_type,
3615 fcport->scan_state);
3616 }
3533 3617
3534 list_move_tail(&fcport->list, &vha->vp_fcports); 3618 list_move_tail(&fcport->list, &vha->vp_fcports);
3535 } 3619 }
@@ -3725,11 +3809,12 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3725 fcport->fp_speed = new_fcport->fp_speed; 3809 fcport->fp_speed = new_fcport->fp_speed;
3726 3810
3727 /* 3811 /*
3728 * If address the same and state FCS_ONLINE, nothing 3812 * If address the same and state FCS_ONLINE
3729 * changed. 3813 * (or in target mode), nothing changed.
3730 */ 3814 */
3731 if (fcport->d_id.b24 == new_fcport->d_id.b24 && 3815 if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
3732 atomic_read(&fcport->state) == FCS_ONLINE) { 3816 (atomic_read(&fcport->state) == FCS_ONLINE ||
3817 !qla_ini_mode_enabled(base_vha))) {
3733 break; 3818 break;
3734 } 3819 }
3735 3820
@@ -3749,6 +3834,22 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3749 * Log it out if still logged in and mark it for 3834 * Log it out if still logged in and mark it for
3750 * relogin later. 3835 * relogin later.
3751 */ 3836 */
3837 if (!qla_ini_mode_enabled(base_vha)) {
3838 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080,
3839 "port changed FC ID, %8phC"
3840 " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n",
3841 fcport->port_name,
3842 fcport->d_id.b.domain,
3843 fcport->d_id.b.area,
3844 fcport->d_id.b.al_pa,
3845 fcport->loop_id,
3846 new_fcport->d_id.b.domain,
3847 new_fcport->d_id.b.area,
3848 new_fcport->d_id.b.al_pa);
3849 fcport->d_id.b24 = new_fcport->d_id.b24;
3850 break;
3851 }
3852
3752 fcport->d_id.b24 = new_fcport->d_id.b24; 3853 fcport->d_id.b24 = new_fcport->d_id.b24;
3753 fcport->flags |= FCF_LOGIN_NEEDED; 3854 fcport->flags |= FCF_LOGIN_NEEDED;
3754 if (fcport->loop_id != FC_NO_LOOP_ID && 3855 if (fcport->loop_id != FC_NO_LOOP_ID &&
@@ -3768,6 +3869,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3768 if (found) 3869 if (found)
3769 continue; 3870 continue;
3770 /* If device was not in our fcports list, then add it. */ 3871 /* If device was not in our fcports list, then add it. */
3872 new_fcport->scan_state = QLA_FCPORT_FOUND;
3771 list_add_tail(&new_fcport->list, new_fcports); 3873 list_add_tail(&new_fcport->list, new_fcports);
3772 3874
3773 /* Allocate a new replacement fcport. */ 3875 /* Allocate a new replacement fcport. */
@@ -4188,6 +4290,14 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha)
4188 atomic_read(&fcport->state) != FCS_UNCONFIGURED) { 4290 atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
4189 spin_unlock_irqrestore(&ha->vport_slock, flags); 4291 spin_unlock_irqrestore(&ha->vport_slock, flags);
4190 qla2x00_rport_del(fcport); 4292 qla2x00_rport_del(fcport);
4293
4294 /*
4295 * Release the target mode FC NEXUS in
4296 * qla_target.c, if target mod is enabled.
4297 */
4298 qlt_fc_port_deleted(vha, fcport,
4299 base_vha->total_fcport_update_gen);
4300
4191 spin_lock_irqsave(&ha->vport_slock, flags); 4301 spin_lock_irqsave(&ha->vport_slock, flags);
4192 } 4302 }
4193 } 4303 }
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 36fbd4c7af8f..6f02b26a35cf 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -1943,6 +1943,9 @@ qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1943 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 1943 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1944 logio->control_flags = 1944 logio->control_flags =
1945 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO); 1945 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1946 if (!sp->fcport->tgt_session ||
1947 !sp->fcport->tgt_session->keep_nport_handle)
1948 logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
1946 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); 1949 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1947 logio->port_id[0] = sp->fcport->d_id.b.al_pa; 1950 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1948 logio->port_id[1] = sp->fcport->d_id.b.area; 1951 logio->port_id[1] = sp->fcport->d_id.b.area;
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 02b1c1c5355b..b2f713ad9034 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -2415,7 +2415,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
2415 *orig_iocb_cnt = mcp->mb[10]; 2415 *orig_iocb_cnt = mcp->mb[10];
2416 if (vha->hw->flags.npiv_supported && max_npiv_vports) 2416 if (vha->hw->flags.npiv_supported && max_npiv_vports)
2417 *max_npiv_vports = mcp->mb[11]; 2417 *max_npiv_vports = mcp->mb[11];
2418 if ((IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) && max_fcfs) 2418 if ((IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) ||
2419 IS_QLA27XX(vha->hw)) && max_fcfs)
2419 *max_fcfs = mcp->mb[12]; 2420 *max_fcfs = mcp->mb[12];
2420 } 2421 }
2421 2422
@@ -3898,7 +3899,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3898 spin_lock_irqsave(&ha->hardware_lock, flags); 3899 spin_lock_irqsave(&ha->hardware_lock, flags);
3899 if (!(rsp->options & BIT_0)) { 3900 if (!(rsp->options & BIT_0)) {
3900 WRT_REG_DWORD(rsp->rsp_q_out, 0); 3901 WRT_REG_DWORD(rsp->rsp_q_out, 0);
3901 if (!IS_QLA83XX(ha)) 3902 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
3902 WRT_REG_DWORD(rsp->rsp_q_in, 0); 3903 WRT_REG_DWORD(rsp->rsp_q_in, 0);
3903 } 3904 }
3904 3905
@@ -5345,7 +5346,7 @@ qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
5345 mbx_cmd_t *mcp = &mc; 5346 mbx_cmd_t *mcp = &mc;
5346 struct qla_hw_data *ha = vha->hw; 5347 struct qla_hw_data *ha = vha->hw;
5347 5348
5348 if (!IS_QLA83XX(ha)) 5349 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
5349 return QLA_FUNCTION_FAILED; 5350 return QLA_FUNCTION_FAILED;
5350 5351
5351 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__); 5352 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index a28815b8276f..8a5cac8448c7 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -2504,6 +2504,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2504 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2504 ha->mbx_count = MAILBOX_REGISTER_COUNT;
2505 req_length = REQUEST_ENTRY_CNT_24XX; 2505 req_length = REQUEST_ENTRY_CNT_24XX;
2506 rsp_length = RESPONSE_ENTRY_CNT_2300; 2506 rsp_length = RESPONSE_ENTRY_CNT_2300;
2507 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
2507 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2508 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2508 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 2509 ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
2509 ha->gid_list_info_size = 8; 2510 ha->gid_list_info_size = 8;
@@ -3229,11 +3230,15 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
3229 spin_lock_irqsave(vha->host->host_lock, flags); 3230 spin_lock_irqsave(vha->host->host_lock, flags);
3230 fcport->drport = rport; 3231 fcport->drport = rport;
3231 spin_unlock_irqrestore(vha->host->host_lock, flags); 3232 spin_unlock_irqrestore(vha->host->host_lock, flags);
3233 qlt_do_generation_tick(vha, &base_vha->total_fcport_update_gen);
3232 set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); 3234 set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
3233 qla2xxx_wake_dpc(base_vha); 3235 qla2xxx_wake_dpc(base_vha);
3234 } else { 3236 } else {
3235 fc_remote_port_delete(rport); 3237 int now;
3236 qlt_fc_port_deleted(vha, fcport); 3238 if (rport)
3239 fc_remote_port_delete(rport);
3240 qlt_do_generation_tick(vha, &now);
3241 qlt_fc_port_deleted(vha, fcport, now);
3237 } 3242 }
3238} 3243}
3239 3244
@@ -3763,8 +3768,11 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
3763 INIT_LIST_HEAD(&vha->vp_fcports); 3768 INIT_LIST_HEAD(&vha->vp_fcports);
3764 INIT_LIST_HEAD(&vha->work_list); 3769 INIT_LIST_HEAD(&vha->work_list);
3765 INIT_LIST_HEAD(&vha->list); 3770 INIT_LIST_HEAD(&vha->list);
3771 INIT_LIST_HEAD(&vha->qla_cmd_list);
3772 INIT_LIST_HEAD(&vha->qla_sess_op_cmd_list);
3766 3773
3767 spin_lock_init(&vha->work_lock); 3774 spin_lock_init(&vha->work_lock);
3775 spin_lock_init(&vha->cmd_list_lock);
3768 3776
3769 sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no); 3777 sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
3770 ql_dbg(ql_dbg_init, vha, 0x0041, 3778 ql_dbg(ql_dbg_init, vha, 0x0041,
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 028e8c8a7de9..2feb5f38edcd 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -1697,7 +1697,7 @@ qla83xx_select_led_port(struct qla_hw_data *ha)
1697{ 1697{
1698 uint32_t led_select_value = 0; 1698 uint32_t led_select_value = 0;
1699 1699
1700 if (!IS_QLA83XX(ha)) 1700 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
1701 goto out; 1701 goto out;
1702 1702
1703 if (ha->port_no == 0) 1703 if (ha->port_no == 0)
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index b749026aa592..58651ecbd88c 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -113,6 +113,11 @@ static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
113static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, 113static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
114 struct atio_from_isp *atio, uint16_t status, int qfull); 114 struct atio_from_isp *atio, uint16_t status, int qfull);
115static void qlt_disable_vha(struct scsi_qla_host *vha); 115static void qlt_disable_vha(struct scsi_qla_host *vha);
116static void qlt_clear_tgt_db(struct qla_tgt *tgt);
117static void qlt_send_notify_ack(struct scsi_qla_host *vha,
118 struct imm_ntfy_from_isp *ntfy,
119 uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
120 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan);
116/* 121/*
117 * Global Variables 122 * Global Variables
118 */ 123 */
@@ -122,6 +127,16 @@ static struct workqueue_struct *qla_tgt_wq;
122static DEFINE_MUTEX(qla_tgt_mutex); 127static DEFINE_MUTEX(qla_tgt_mutex);
123static LIST_HEAD(qla_tgt_glist); 128static LIST_HEAD(qla_tgt_glist);
124 129
130/* This API intentionally takes dest as a parameter, rather than returning
131 * int value to avoid caller forgetting to issue wmb() after the store */
132void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
133{
134 scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
135 *dest = atomic_inc_return(&base_vha->generation_tick);
136 /* memory barrier */
137 wmb();
138}
139
125/* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */ 140/* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
126static struct qla_tgt_sess *qlt_find_sess_by_port_name( 141static struct qla_tgt_sess *qlt_find_sess_by_port_name(
127 struct qla_tgt *tgt, 142 struct qla_tgt *tgt,
@@ -381,14 +396,73 @@ static void qlt_free_session_done(struct work_struct *work)
381 struct qla_tgt *tgt = sess->tgt; 396 struct qla_tgt *tgt = sess->tgt;
382 struct scsi_qla_host *vha = sess->vha; 397 struct scsi_qla_host *vha = sess->vha;
383 struct qla_hw_data *ha = vha->hw; 398 struct qla_hw_data *ha = vha->hw;
399 unsigned long flags;
400 bool logout_started = false;
401 fc_port_t fcport;
402
403 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084,
404 "%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
405 " s_id %02x:%02x:%02x logout %d keep %d plogi %d\n",
406 __func__, sess->se_sess, sess, sess->port_name, sess->loop_id,
407 sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
408 sess->logout_on_delete, sess->keep_nport_handle,
409 sess->plogi_ack_needed);
384 410
385 BUG_ON(!tgt); 411 BUG_ON(!tgt);
412
413 if (sess->logout_on_delete) {
414 int rc;
415
416 memset(&fcport, 0, sizeof(fcport));
417 fcport.loop_id = sess->loop_id;
418 fcport.d_id = sess->s_id;
419 memcpy(fcport.port_name, sess->port_name, WWN_SIZE);
420 fcport.vha = vha;
421 fcport.tgt_session = sess;
422
423 rc = qla2x00_post_async_logout_work(vha, &fcport, NULL);
424 if (rc != QLA_SUCCESS)
425 ql_log(ql_log_warn, vha, 0xf085,
426 "Schedule logo failed sess %p rc %d\n",
427 sess, rc);
428 else
429 logout_started = true;
430 }
431
386 /* 432 /*
387 * Release the target session for FC Nexus from fabric module code. 433 * Release the target session for FC Nexus from fabric module code.
388 */ 434 */
389 if (sess->se_sess != NULL) 435 if (sess->se_sess != NULL)
390 ha->tgt.tgt_ops->free_session(sess); 436 ha->tgt.tgt_ops->free_session(sess);
391 437
438 if (logout_started) {
439 bool traced = false;
440
441 while (!ACCESS_ONCE(sess->logout_completed)) {
442 if (!traced) {
443 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086,
444 "%s: waiting for sess %p logout\n",
445 __func__, sess);
446 traced = true;
447 }
448 msleep(100);
449 }
450
451 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf087,
452 "%s: sess %p logout completed\n",
453 __func__, sess);
454 }
455
456 spin_lock_irqsave(&ha->hardware_lock, flags);
457
458 if (sess->plogi_ack_needed)
459 qlt_send_notify_ack(vha, &sess->tm_iocb,
460 0, 0, 0, 0, 0, 0);
461
462 list_del(&sess->sess_list_entry);
463
464 spin_unlock_irqrestore(&ha->hardware_lock, flags);
465
392 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001, 466 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
393 "Unregistration of sess %p finished\n", sess); 467 "Unregistration of sess %p finished\n", sess);
394 468
@@ -409,9 +483,9 @@ void qlt_unreg_sess(struct qla_tgt_sess *sess)
409 483
410 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess); 484 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
411 485
412 list_del(&sess->sess_list_entry); 486 if (!list_empty(&sess->del_list_entry))
413 if (sess->deleted) 487 list_del_init(&sess->del_list_entry);
414 list_del(&sess->del_list_entry); 488 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
415 489
416 INIT_WORK(&sess->free_work, qlt_free_session_done); 490 INIT_WORK(&sess->free_work, qlt_free_session_done);
417 schedule_work(&sess->free_work); 491 schedule_work(&sess->free_work);
@@ -431,10 +505,10 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
431 505
432 loop_id = le16_to_cpu(n->u.isp24.nport_handle); 506 loop_id = le16_to_cpu(n->u.isp24.nport_handle);
433 if (loop_id == 0xFFFF) { 507 if (loop_id == 0xFFFF) {
434#if 0 /* FIXME: Re-enable Global event handling.. */
435 /* Global event */ 508 /* Global event */
436 atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count); 509 atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
437 qlt_clear_tgt_db(ha->tgt.qla_tgt); 510 qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
511#if 0 /* FIXME: do we need to choose a session here? */
438 if (!list_empty(&ha->tgt.qla_tgt->sess_list)) { 512 if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
439 sess = list_entry(ha->tgt.qla_tgt->sess_list.next, 513 sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
440 typeof(*sess), sess_list_entry); 514 typeof(*sess), sess_list_entry);
@@ -489,27 +563,38 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
489 struct qla_tgt *tgt = sess->tgt; 563 struct qla_tgt *tgt = sess->tgt;
490 uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5; 564 uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5;
491 565
492 if (sess->deleted) 566 if (sess->deleted) {
493 return; 567 /* Upgrade to unconditional deletion in case it was temporary */
568 if (immediate && sess->deleted == QLA_SESS_DELETION_PENDING)
569 list_del(&sess->del_list_entry);
570 else
571 return;
572 }
494 573
495 ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, 574 ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
496 "Scheduling sess %p for deletion\n", sess); 575 "Scheduling sess %p for deletion\n", sess);
497 list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
498 sess->deleted = 1;
499 576
500 if (immediate) 577 if (immediate) {
501 dev_loss_tmo = 0; 578 dev_loss_tmo = 0;
579 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
580 list_add(&sess->del_list_entry, &tgt->del_sess_list);
581 } else {
582 sess->deleted = QLA_SESS_DELETION_PENDING;
583 list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
584 }
502 585
503 sess->expires = jiffies + dev_loss_tmo * HZ; 586 sess->expires = jiffies + dev_loss_tmo * HZ;
504 587
505 ql_dbg(ql_dbg_tgt, sess->vha, 0xe048, 588 ql_dbg(ql_dbg_tgt, sess->vha, 0xe048,
506 "qla_target(%d): session for port %8phC (loop ID %d) scheduled for " 589 "qla_target(%d): session for port %8phC (loop ID %d s_id %02x:%02x:%02x)"
507 "deletion in %u secs (expires: %lu) immed: %d\n", 590 " scheduled for deletion in %u secs (expires: %lu) immed: %d, logout: %d, gen: %#x\n",
508 sess->vha->vp_idx, sess->port_name, sess->loop_id, dev_loss_tmo, 591 sess->vha->vp_idx, sess->port_name, sess->loop_id,
509 sess->expires, immediate); 592 sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
593 dev_loss_tmo, sess->expires, immediate, sess->logout_on_delete,
594 sess->generation);
510 595
511 if (immediate) 596 if (immediate)
512 schedule_delayed_work(&tgt->sess_del_work, 0); 597 mod_delayed_work(system_wq, &tgt->sess_del_work, 0);
513 else 598 else
514 schedule_delayed_work(&tgt->sess_del_work, 599 schedule_delayed_work(&tgt->sess_del_work,
515 sess->expires - jiffies); 600 sess->expires - jiffies);
@@ -578,9 +663,9 @@ out_free_id_list:
578/* ha->hardware_lock supposed to be held on entry */ 663/* ha->hardware_lock supposed to be held on entry */
579static void qlt_undelete_sess(struct qla_tgt_sess *sess) 664static void qlt_undelete_sess(struct qla_tgt_sess *sess)
580{ 665{
581 BUG_ON(!sess->deleted); 666 BUG_ON(sess->deleted != QLA_SESS_DELETION_PENDING);
582 667
583 list_del(&sess->del_list_entry); 668 list_del_init(&sess->del_list_entry);
584 sess->deleted = 0; 669 sess->deleted = 0;
585} 670}
586 671
@@ -599,7 +684,9 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
599 del_list_entry); 684 del_list_entry);
600 elapsed = jiffies; 685 elapsed = jiffies;
601 if (time_after_eq(elapsed, sess->expires)) { 686 if (time_after_eq(elapsed, sess->expires)) {
602 qlt_undelete_sess(sess); 687 /* No turning back */
688 list_del_init(&sess->del_list_entry);
689 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
603 690
604 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, 691 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
605 "Timeout: sess %p about to be deleted\n", 692 "Timeout: sess %p about to be deleted\n",
@@ -643,6 +730,13 @@ static struct qla_tgt_sess *qlt_create_sess(
643 fcport->d_id.b.al_pa, fcport->d_id.b.area, 730 fcport->d_id.b.al_pa, fcport->d_id.b.area,
644 fcport->loop_id); 731 fcport->loop_id);
645 732
733 /* Cannot undelete at this point */
734 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
735 spin_unlock_irqrestore(&ha->hardware_lock,
736 flags);
737 return NULL;
738 }
739
646 if (sess->deleted) 740 if (sess->deleted)
647 qlt_undelete_sess(sess); 741 qlt_undelete_sess(sess);
648 742
@@ -652,6 +746,9 @@ static struct qla_tgt_sess *qlt_create_sess(
652 746
653 if (sess->local && !local) 747 if (sess->local && !local)
654 sess->local = 0; 748 sess->local = 0;
749
750 qlt_do_generation_tick(vha, &sess->generation);
751
655 spin_unlock_irqrestore(&ha->hardware_lock, flags); 752 spin_unlock_irqrestore(&ha->hardware_lock, flags);
656 753
657 return sess; 754 return sess;
@@ -673,6 +770,14 @@ static struct qla_tgt_sess *qlt_create_sess(
673 sess->s_id = fcport->d_id; 770 sess->s_id = fcport->d_id;
674 sess->loop_id = fcport->loop_id; 771 sess->loop_id = fcport->loop_id;
675 sess->local = local; 772 sess->local = local;
773 INIT_LIST_HEAD(&sess->del_list_entry);
774
775 /* Under normal circumstances we want to logout from firmware when
776 * session eventually ends and release corresponding nport handle.
777 * In the exception cases (e.g. when new PLOGI is waiting) corresponding
778 * code will adjust these flags as necessary. */
779 sess->logout_on_delete = 1;
780 sess->keep_nport_handle = 0;
676 781
677 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006, 782 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
678 "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n", 783 "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
@@ -705,6 +810,7 @@ static struct qla_tgt_sess *qlt_create_sess(
705 spin_lock_irqsave(&ha->hardware_lock, flags); 810 spin_lock_irqsave(&ha->hardware_lock, flags);
706 list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list); 811 list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list);
707 vha->vha_tgt.qla_tgt->sess_count++; 812 vha->vha_tgt.qla_tgt->sess_count++;
813 qlt_do_generation_tick(vha, &sess->generation);
708 spin_unlock_irqrestore(&ha->hardware_lock, flags); 814 spin_unlock_irqrestore(&ha->hardware_lock, flags);
709 815
710 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b, 816 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
@@ -718,7 +824,7 @@ static struct qla_tgt_sess *qlt_create_sess(
718} 824}
719 825
720/* 826/*
721 * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port() 827 * Called from qla2x00_reg_remote_port()
722 */ 828 */
723void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) 829void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
724{ 830{
@@ -750,6 +856,10 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
750 mutex_unlock(&vha->vha_tgt.tgt_mutex); 856 mutex_unlock(&vha->vha_tgt.tgt_mutex);
751 857
752 spin_lock_irqsave(&ha->hardware_lock, flags); 858 spin_lock_irqsave(&ha->hardware_lock, flags);
859 } else if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
860 /* Point of no return */
861 spin_unlock_irqrestore(&ha->hardware_lock, flags);
862 return;
753 } else { 863 } else {
754 kref_get(&sess->se_sess->sess_kref); 864 kref_get(&sess->se_sess->sess_kref);
755 865
@@ -780,27 +890,36 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
780 spin_unlock_irqrestore(&ha->hardware_lock, flags); 890 spin_unlock_irqrestore(&ha->hardware_lock, flags);
781} 891}
782 892
783void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport) 893/*
894 * max_gen - specifies maximum session generation
895 * at which this deletion requestion is still valid
896 */
897void
898qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
784{ 899{
785 struct qla_hw_data *ha = vha->hw;
786 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 900 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
787 struct qla_tgt_sess *sess; 901 struct qla_tgt_sess *sess;
788 unsigned long flags;
789 902
790 if (!vha->hw->tgt.tgt_ops) 903 if (!vha->hw->tgt.tgt_ops)
791 return; 904 return;
792 905
793 if (!tgt || (fcport->port_type != FCT_INITIATOR)) 906 if (!tgt)
794 return; 907 return;
795 908
796 spin_lock_irqsave(&ha->hardware_lock, flags);
797 if (tgt->tgt_stop) { 909 if (tgt->tgt_stop) {
798 spin_unlock_irqrestore(&ha->hardware_lock, flags);
799 return; 910 return;
800 } 911 }
801 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name); 912 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
802 if (!sess) { 913 if (!sess) {
803 spin_unlock_irqrestore(&ha->hardware_lock, flags); 914 return;
915 }
916
917 if (max_gen - sess->generation < 0) {
918 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092,
919 "Ignoring stale deletion request for se_sess %p / sess %p"
920 " for port %8phC, req_gen %d, sess_gen %d\n",
921 sess->se_sess, sess, sess->port_name, max_gen,
922 sess->generation);
804 return; 923 return;
805 } 924 }
806 925
@@ -808,7 +927,6 @@ void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
808 927
809 sess->local = 1; 928 sess->local = 1;
810 qlt_schedule_sess_for_deletion(sess, false); 929 qlt_schedule_sess_for_deletion(sess, false);
811 spin_unlock_irqrestore(&ha->hardware_lock, flags);
812} 930}
813 931
814static inline int test_tgt_sess_count(struct qla_tgt *tgt) 932static inline int test_tgt_sess_count(struct qla_tgt *tgt)
@@ -1175,6 +1293,70 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
1175 FCP_TMF_CMPL, true); 1293 FCP_TMF_CMPL, true);
1176} 1294}
1177 1295
1296static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag)
1297{
1298 struct qla_tgt_sess_op *op;
1299 struct qla_tgt_cmd *cmd;
1300
1301 spin_lock(&vha->cmd_list_lock);
1302
1303 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
1304 if (tag == op->atio.u.isp24.exchange_addr) {
1305 op->aborted = true;
1306 spin_unlock(&vha->cmd_list_lock);
1307 return 1;
1308 }
1309 }
1310
1311 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
1312 if (tag == cmd->atio.u.isp24.exchange_addr) {
1313 cmd->state = QLA_TGT_STATE_ABORTED;
1314 spin_unlock(&vha->cmd_list_lock);
1315 return 1;
1316 }
1317 }
1318
1319 spin_unlock(&vha->cmd_list_lock);
1320 return 0;
1321}
1322
1323/* drop cmds for the given lun
1324 * XXX only looks for cmds on the port through which lun reset was recieved
1325 * XXX does not go through the list of other port (which may have cmds
1326 * for the same lun)
1327 */
1328static void abort_cmds_for_lun(struct scsi_qla_host *vha,
1329 uint32_t lun, uint8_t *s_id)
1330{
1331 struct qla_tgt_sess_op *op;
1332 struct qla_tgt_cmd *cmd;
1333 uint32_t key;
1334
1335 key = sid_to_key(s_id);
1336 spin_lock(&vha->cmd_list_lock);
1337 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
1338 uint32_t op_key;
1339 uint32_t op_lun;
1340
1341 op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
1342 op_lun = scsilun_to_int(
1343 (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
1344 if (op_key == key && op_lun == lun)
1345 op->aborted = true;
1346 }
1347 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
1348 uint32_t cmd_key;
1349 uint32_t cmd_lun;
1350
1351 cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
1352 cmd_lun = scsilun_to_int(
1353 (struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun);
1354 if (cmd_key == key && cmd_lun == lun)
1355 cmd->state = QLA_TGT_STATE_ABORTED;
1356 }
1357 spin_unlock(&vha->cmd_list_lock);
1358}
1359
1178/* ha->hardware_lock supposed to be held on entry */ 1360/* ha->hardware_lock supposed to be held on entry */
1179static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, 1361static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1180 struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess) 1362 struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
@@ -1199,8 +1381,19 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1199 } 1381 }
1200 spin_unlock(&se_sess->sess_cmd_lock); 1382 spin_unlock(&se_sess->sess_cmd_lock);
1201 1383
1202 if (!found_lun) 1384 /* cmd not in LIO lists, look in qla list */
1203 return -ENOENT; 1385 if (!found_lun) {
1386 if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) {
1387 /* send TASK_ABORT response immediately */
1388 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_CMPL, false);
1389 return 0;
1390 } else {
1391 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf081,
1392 "unable to find cmd in driver or LIO for tag 0x%x\n",
1393 abts->exchange_addr_to_abort);
1394 return -ENOENT;
1395 }
1396 }
1204 1397
1205 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f, 1398 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
1206 "qla_target(%d): task abort (tag=%d)\n", 1399 "qla_target(%d): task abort (tag=%d)\n",
@@ -1284,6 +1477,11 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1284 return; 1477 return;
1285 } 1478 }
1286 1479
1480 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
1481 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1482 return;
1483 }
1484
1287 rc = __qlt_24xx_handle_abts(vha, abts, sess); 1485 rc = __qlt_24xx_handle_abts(vha, abts, sess);
1288 if (rc != 0) { 1486 if (rc != 0) {
1289 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054, 1487 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
@@ -1726,20 +1924,6 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
1726 struct qla_hw_data *ha = vha->hw; 1924 struct qla_hw_data *ha = vha->hw;
1727 struct se_cmd *se_cmd = &cmd->se_cmd; 1925 struct se_cmd *se_cmd = &cmd->se_cmd;
1728 1926
1729 if (unlikely(cmd->aborted)) {
1730 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
1731 "qla_target(%d): terminating exchange for aborted cmd=%p (se_cmd=%p, tag=%lld)",
1732 vha->vp_idx, cmd, se_cmd, se_cmd->tag);
1733
1734 cmd->state = QLA_TGT_STATE_ABORTED;
1735 cmd->cmd_flags |= BIT_6;
1736
1737 qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
1738
1739 /* !! At this point cmd could be already freed !! */
1740 return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED;
1741 }
1742
1743 prm->cmd = cmd; 1927 prm->cmd = cmd;
1744 prm->tgt = tgt; 1928 prm->tgt = tgt;
1745 prm->rq_result = scsi_status; 1929 prm->rq_result = scsi_status;
@@ -2301,6 +2485,19 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
2301 unsigned long flags = 0; 2485 unsigned long flags = 0;
2302 int res; 2486 int res;
2303 2487
2488 spin_lock_irqsave(&ha->hardware_lock, flags);
2489 if (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
2490 cmd->state = QLA_TGT_STATE_PROCESSED;
2491 if (cmd->sess->logout_completed)
2492 /* no need to terminate. FW already freed exchange. */
2493 qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
2494 else
2495 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
2496 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2497 return 0;
2498 }
2499 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2500
2304 memset(&prm, 0, sizeof(prm)); 2501 memset(&prm, 0, sizeof(prm));
2305 qlt_check_srr_debug(cmd, &xmit_type); 2502 qlt_check_srr_debug(cmd, &xmit_type);
2306 2503
@@ -2313,9 +2510,6 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
2313 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status, 2510 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
2314 &full_req_cnt); 2511 &full_req_cnt);
2315 if (unlikely(res != 0)) { 2512 if (unlikely(res != 0)) {
2316 if (res == QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED)
2317 return 0;
2318
2319 return res; 2513 return res;
2320 } 2514 }
2321 2515
@@ -2345,9 +2539,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
2345 res = qlt_build_ctio_crc2_pkt(&prm, vha); 2539 res = qlt_build_ctio_crc2_pkt(&prm, vha);
2346 else 2540 else
2347 res = qlt_24xx_build_ctio_pkt(&prm, vha); 2541 res = qlt_24xx_build_ctio_pkt(&prm, vha);
2348 if (unlikely(res != 0)) 2542 if (unlikely(res != 0)) {
2543 vha->req->cnt += full_req_cnt;
2349 goto out_unmap_unlock; 2544 goto out_unmap_unlock;
2350 2545 }
2351 2546
2352 pkt = (struct ctio7_to_24xx *)prm.pkt; 2547 pkt = (struct ctio7_to_24xx *)prm.pkt;
2353 2548
@@ -2461,7 +2656,8 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
2461 2656
2462 spin_lock_irqsave(&ha->hardware_lock, flags); 2657 spin_lock_irqsave(&ha->hardware_lock, flags);
2463 2658
2464 if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) { 2659 if (qla2x00_reset_active(vha) || (cmd->reset_count != ha->chip_reset) ||
2660 (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)) {
2465 /* 2661 /*
2466 * Either a chip reset is active or this request was from 2662 * Either a chip reset is active or this request was from
2467 * previous life, just abort the processing. 2663 * previous life, just abort the processing.
@@ -2485,8 +2681,11 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
2485 else 2681 else
2486 res = qlt_24xx_build_ctio_pkt(&prm, vha); 2682 res = qlt_24xx_build_ctio_pkt(&prm, vha);
2487 2683
2488 if (unlikely(res != 0)) 2684 if (unlikely(res != 0)) {
2685 vha->req->cnt += prm.req_cnt;
2489 goto out_unlock_free_unmap; 2686 goto out_unlock_free_unmap;
2687 }
2688
2490 pkt = (struct ctio7_to_24xx *)prm.pkt; 2689 pkt = (struct ctio7_to_24xx *)prm.pkt;
2491 pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT | 2690 pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
2492 CTIO7_FLAGS_STATUS_MODE_0); 2691 CTIO7_FLAGS_STATUS_MODE_0);
@@ -2651,6 +2850,89 @@ out:
2651 2850
2652/* If hardware_lock held on entry, might drop it, then reaquire */ 2851/* If hardware_lock held on entry, might drop it, then reaquire */
2653/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ 2852/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
2853static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
2854 struct imm_ntfy_from_isp *ntfy)
2855{
2856 struct nack_to_isp *nack;
2857 struct qla_hw_data *ha = vha->hw;
2858 request_t *pkt;
2859 int ret = 0;
2860
2861 ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c,
2862 "Sending TERM ELS CTIO (ha=%p)\n", ha);
2863
2864 pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL);
2865 if (pkt == NULL) {
2866 ql_dbg(ql_dbg_tgt, vha, 0xe080,
2867 "qla_target(%d): %s failed: unable to allocate "
2868 "request packet\n", vha->vp_idx, __func__);
2869 return -ENOMEM;
2870 }
2871
2872 pkt->entry_type = NOTIFY_ACK_TYPE;
2873 pkt->entry_count = 1;
2874 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
2875
2876 nack = (struct nack_to_isp *)pkt;
2877 nack->ox_id = ntfy->ox_id;
2878
2879 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
2880 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
2881 nack->u.isp24.flags = ntfy->u.isp24.flags &
2882 __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
2883 }
2884
2885 /* terminate */
2886 nack->u.isp24.flags |=
2887 __constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE);
2888
2889 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
2890 nack->u.isp24.status = ntfy->u.isp24.status;
2891 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
2892 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
2893 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
2894 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
2895 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
2896 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
2897
2898 qla2x00_start_iocbs(vha, vha->req);
2899 return ret;
2900}
2901
2902static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
2903 struct imm_ntfy_from_isp *imm, int ha_locked)
2904{
2905 unsigned long flags = 0;
2906 int rc;
2907
2908 if (qlt_issue_marker(vha, ha_locked) < 0)
2909 return;
2910
2911 if (ha_locked) {
2912 rc = __qlt_send_term_imm_notif(vha, imm);
2913
2914#if 0 /* Todo */
2915 if (rc == -ENOMEM)
2916 qlt_alloc_qfull_cmd(vha, imm, 0, 0);
2917#endif
2918 goto done;
2919 }
2920
2921 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
2922 rc = __qlt_send_term_imm_notif(vha, imm);
2923
2924#if 0 /* Todo */
2925 if (rc == -ENOMEM)
2926 qlt_alloc_qfull_cmd(vha, imm, 0, 0);
2927#endif
2928
2929done:
2930 if (!ha_locked)
2931 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
2932}
2933
2934/* If hardware_lock held on entry, might drop it, then reaquire */
2935/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
2654static int __qlt_send_term_exchange(struct scsi_qla_host *vha, 2936static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
2655 struct qla_tgt_cmd *cmd, 2937 struct qla_tgt_cmd *cmd,
2656 struct atio_from_isp *atio) 2938 struct atio_from_isp *atio)
@@ -2715,7 +2997,7 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
2715static void qlt_send_term_exchange(struct scsi_qla_host *vha, 2997static void qlt_send_term_exchange(struct scsi_qla_host *vha,
2716 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked) 2998 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked)
2717{ 2999{
2718 unsigned long flags; 3000 unsigned long flags = 0;
2719 int rc; 3001 int rc;
2720 3002
2721 if (qlt_issue_marker(vha, ha_locked) < 0) 3003 if (qlt_issue_marker(vha, ha_locked) < 0)
@@ -2731,17 +3013,18 @@ static void qlt_send_term_exchange(struct scsi_qla_host *vha,
2731 rc = __qlt_send_term_exchange(vha, cmd, atio); 3013 rc = __qlt_send_term_exchange(vha, cmd, atio);
2732 if (rc == -ENOMEM) 3014 if (rc == -ENOMEM)
2733 qlt_alloc_qfull_cmd(vha, atio, 0, 0); 3015 qlt_alloc_qfull_cmd(vha, atio, 0, 0);
2734 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
2735 3016
2736done: 3017done:
2737 if (cmd && ((cmd->state != QLA_TGT_STATE_ABORTED) || 3018 if (cmd && ((cmd->state != QLA_TGT_STATE_ABORTED) ||
2738 !cmd->cmd_sent_to_fw)) { 3019 !cmd->cmd_sent_to_fw)) {
2739 if (!ha_locked && !in_interrupt()) 3020 if (cmd->sg_mapped)
2740 msleep(250); /* just in case */ 3021 qlt_unmap_sg(vha, cmd);
2741
2742 qlt_unmap_sg(vha, cmd);
2743 vha->hw->tgt.tgt_ops->free_cmd(cmd); 3022 vha->hw->tgt.tgt_ops->free_cmd(cmd);
2744 } 3023 }
3024
3025 if (!ha_locked)
3026 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
3027
2745 return; 3028 return;
2746} 3029}
2747 3030
@@ -2792,6 +3075,24 @@ static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
2792 3075
2793} 3076}
2794 3077
3078void qlt_abort_cmd(struct qla_tgt_cmd *cmd)
3079{
3080 struct qla_tgt *tgt = cmd->tgt;
3081 struct scsi_qla_host *vha = tgt->vha;
3082 struct se_cmd *se_cmd = &cmd->se_cmd;
3083
3084 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
3085 "qla_target(%d): terminating exchange for aborted cmd=%p "
3086 "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
3087 se_cmd->tag);
3088
3089 cmd->state = QLA_TGT_STATE_ABORTED;
3090 cmd->cmd_flags |= BIT_6;
3091
3092 qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
3093}
3094EXPORT_SYMBOL(qlt_abort_cmd);
3095
2795void qlt_free_cmd(struct qla_tgt_cmd *cmd) 3096void qlt_free_cmd(struct qla_tgt_cmd *cmd)
2796{ 3097{
2797 struct qla_tgt_sess *sess = cmd->sess; 3098 struct qla_tgt_sess *sess = cmd->sess;
@@ -3015,7 +3316,7 @@ qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
3015 dump_stack(); 3316 dump_stack();
3016 } 3317 }
3017 3318
3018 cmd->cmd_flags |= BIT_12; 3319 cmd->cmd_flags |= BIT_17;
3019 ha->tgt.tgt_ops->free_cmd(cmd); 3320 ha->tgt.tgt_ops->free_cmd(cmd);
3020} 3321}
3021 3322
@@ -3177,7 +3478,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
3177skip_term: 3478skip_term:
3178 3479
3179 if (cmd->state == QLA_TGT_STATE_PROCESSED) { 3480 if (cmd->state == QLA_TGT_STATE_PROCESSED) {
3180 ; 3481 cmd->cmd_flags |= BIT_12;
3181 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) { 3482 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
3182 int rx_status = 0; 3483 int rx_status = 0;
3183 3484
@@ -3191,9 +3492,11 @@ skip_term:
3191 ha->tgt.tgt_ops->handle_data(cmd); 3492 ha->tgt.tgt_ops->handle_data(cmd);
3192 return; 3493 return;
3193 } else if (cmd->state == QLA_TGT_STATE_ABORTED) { 3494 } else if (cmd->state == QLA_TGT_STATE_ABORTED) {
3495 cmd->cmd_flags |= BIT_18;
3194 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e, 3496 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
3195 "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag); 3497 "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag);
3196 } else { 3498 } else {
3499 cmd->cmd_flags |= BIT_19;
3197 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c, 3500 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
3198 "qla_target(%d): A command in state (%d) should " 3501 "qla_target(%d): A command in state (%d) should "
3199 "not return a CTIO complete\n", vha->vp_idx, cmd->state); 3502 "not return a CTIO complete\n", vha->vp_idx, cmd->state);
@@ -3205,7 +3508,6 @@ skip_term:
3205 dump_stack(); 3508 dump_stack();
3206 } 3509 }
3207 3510
3208
3209 ha->tgt.tgt_ops->free_cmd(cmd); 3511 ha->tgt.tgt_ops->free_cmd(cmd);
3210} 3512}
3211 3513
@@ -3263,6 +3565,13 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
3263 if (tgt->tgt_stop) 3565 if (tgt->tgt_stop)
3264 goto out_term; 3566 goto out_term;
3265 3567
3568 if (cmd->state == QLA_TGT_STATE_ABORTED) {
3569 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082,
3570 "cmd with tag %u is aborted\n",
3571 cmd->atio.u.isp24.exchange_addr);
3572 goto out_term;
3573 }
3574
3266 cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; 3575 cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
3267 cmd->se_cmd.tag = atio->u.isp24.exchange_addr; 3576 cmd->se_cmd.tag = atio->u.isp24.exchange_addr;
3268 cmd->unpacked_lun = scsilun_to_int( 3577 cmd->unpacked_lun = scsilun_to_int(
@@ -3316,6 +3625,12 @@ out_term:
3316static void qlt_do_work(struct work_struct *work) 3625static void qlt_do_work(struct work_struct *work)
3317{ 3626{
3318 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); 3627 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
3628 scsi_qla_host_t *vha = cmd->vha;
3629 unsigned long flags;
3630
3631 spin_lock_irqsave(&vha->cmd_list_lock, flags);
3632 list_del(&cmd->cmd_list);
3633 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
3319 3634
3320 __qlt_do_work(cmd); 3635 __qlt_do_work(cmd);
3321} 3636}
@@ -3345,6 +3660,11 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
3345 cmd->loop_id = sess->loop_id; 3660 cmd->loop_id = sess->loop_id;
3346 cmd->conf_compl_supported = sess->conf_compl_supported; 3661 cmd->conf_compl_supported = sess->conf_compl_supported;
3347 3662
3663 cmd->cmd_flags = 0;
3664 cmd->jiffies_at_alloc = get_jiffies_64();
3665
3666 cmd->reset_count = vha->hw->chip_reset;
3667
3348 return cmd; 3668 return cmd;
3349} 3669}
3350 3670
@@ -3362,14 +3682,25 @@ static void qlt_create_sess_from_atio(struct work_struct *work)
3362 unsigned long flags; 3682 unsigned long flags;
3363 uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id; 3683 uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id;
3364 3684
3685 spin_lock_irqsave(&vha->cmd_list_lock, flags);
3686 list_del(&op->cmd_list);
3687 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
3688
3689 if (op->aborted) {
3690 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf083,
3691 "sess_op with tag %u is aborted\n",
3692 op->atio.u.isp24.exchange_addr);
3693 goto out_term;
3694 }
3695
3365 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022, 3696 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
3366 "qla_target(%d): Unable to find wwn login" 3697 "qla_target(%d): Unable to find wwn login"
3367 " (s_id %x:%x:%x), trying to create it manually\n", 3698 " (s_id %x:%x:%x), trying to create it manually\n",
3368 vha->vp_idx, s_id[0], s_id[1], s_id[2]); 3699 vha->vp_idx, s_id[0], s_id[1], s_id[2]);
3369 3700
3370 if (op->atio.u.raw.entry_count > 1) { 3701 if (op->atio.u.raw.entry_count > 1) {
3371 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023, 3702 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
3372 "Dropping multy entry atio %p\n", &op->atio); 3703 "Dropping multy entry atio %p\n", &op->atio);
3373 goto out_term; 3704 goto out_term;
3374 } 3705 }
3375 3706
@@ -3434,10 +3765,25 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
3434 3765
3435 memcpy(&op->atio, atio, sizeof(*atio)); 3766 memcpy(&op->atio, atio, sizeof(*atio));
3436 op->vha = vha; 3767 op->vha = vha;
3768
3769 spin_lock(&vha->cmd_list_lock);
3770 list_add_tail(&op->cmd_list, &vha->qla_sess_op_cmd_list);
3771 spin_unlock(&vha->cmd_list_lock);
3772
3437 INIT_WORK(&op->work, qlt_create_sess_from_atio); 3773 INIT_WORK(&op->work, qlt_create_sess_from_atio);
3438 queue_work(qla_tgt_wq, &op->work); 3774 queue_work(qla_tgt_wq, &op->work);
3439 return 0; 3775 return 0;
3440 } 3776 }
3777
3778 /* Another WWN used to have our s_id. Our PLOGI scheduled its
3779 * session deletion, but it's still in sess_del_work wq */
3780 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
3781 ql_dbg(ql_dbg_io, vha, 0x3061,
3782 "New command while old session %p is being deleted\n",
3783 sess);
3784 return -EFAULT;
3785 }
3786
3441 /* 3787 /*
3442 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock. 3788 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
3443 */ 3789 */
@@ -3451,13 +3797,13 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
3451 return -ENOMEM; 3797 return -ENOMEM;
3452 } 3798 }
3453 3799
3454 cmd->cmd_flags = 0;
3455 cmd->jiffies_at_alloc = get_jiffies_64();
3456
3457 cmd->reset_count = vha->hw->chip_reset;
3458
3459 cmd->cmd_in_wq = 1; 3800 cmd->cmd_in_wq = 1;
3460 cmd->cmd_flags |= BIT_0; 3801 cmd->cmd_flags |= BIT_0;
3802
3803 spin_lock(&vha->cmd_list_lock);
3804 list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
3805 spin_unlock(&vha->cmd_list_lock);
3806
3461 INIT_WORK(&cmd->work, qlt_do_work); 3807 INIT_WORK(&cmd->work, qlt_do_work);
3462 queue_work(qla_tgt_wq, &cmd->work); 3808 queue_work(qla_tgt_wq, &cmd->work);
3463 return 0; 3809 return 0;
@@ -3471,6 +3817,7 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
3471 struct scsi_qla_host *vha = sess->vha; 3817 struct scsi_qla_host *vha = sess->vha;
3472 struct qla_hw_data *ha = vha->hw; 3818 struct qla_hw_data *ha = vha->hw;
3473 struct qla_tgt_mgmt_cmd *mcmd; 3819 struct qla_tgt_mgmt_cmd *mcmd;
3820 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
3474 int res; 3821 int res;
3475 uint8_t tmr_func; 3822 uint8_t tmr_func;
3476 3823
@@ -3511,6 +3858,7 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
3511 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002, 3858 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002,
3512 "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx); 3859 "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx);
3513 tmr_func = TMR_LUN_RESET; 3860 tmr_func = TMR_LUN_RESET;
3861 abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
3514 break; 3862 break;
3515 3863
3516 case QLA_TGT_CLEAR_TS: 3864 case QLA_TGT_CLEAR_TS:
@@ -3599,6 +3947,9 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
3599 sizeof(struct atio_from_isp)); 3947 sizeof(struct atio_from_isp));
3600 } 3948 }
3601 3949
3950 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)
3951 return -EFAULT;
3952
3602 return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); 3953 return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
3603} 3954}
3604 3955
@@ -3664,22 +4015,280 @@ static int qlt_abort_task(struct scsi_qla_host *vha,
3664 return __qlt_abort_task(vha, iocb, sess); 4015 return __qlt_abort_task(vha, iocb, sess);
3665} 4016}
3666 4017
4018void qlt_logo_completion_handler(fc_port_t *fcport, int rc)
4019{
4020 if (fcport->tgt_session) {
4021 if (rc != MBS_COMMAND_COMPLETE) {
4022 ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093,
4023 "%s: se_sess %p / sess %p from"
4024 " port %8phC loop_id %#04x s_id %02x:%02x:%02x"
4025 " LOGO failed: %#x\n",
4026 __func__,
4027 fcport->tgt_session->se_sess,
4028 fcport->tgt_session,
4029 fcport->port_name, fcport->loop_id,
4030 fcport->d_id.b.domain, fcport->d_id.b.area,
4031 fcport->d_id.b.al_pa, rc);
4032 }
4033
4034 fcport->tgt_session->logout_completed = 1;
4035 }
4036}
4037
4038static void qlt_swap_imm_ntfy_iocb(struct imm_ntfy_from_isp *a,
4039 struct imm_ntfy_from_isp *b)
4040{
4041 struct imm_ntfy_from_isp tmp;
4042 memcpy(&tmp, a, sizeof(struct imm_ntfy_from_isp));
4043 memcpy(a, b, sizeof(struct imm_ntfy_from_isp));
4044 memcpy(b, &tmp, sizeof(struct imm_ntfy_from_isp));
4045}
4046
4047/*
4048* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list)
4049*
4050* Schedules sessions with matching port_id/loop_id but different wwn for
4051* deletion. Returns existing session with matching wwn if present.
4052* Null otherwise.
4053*/
4054static struct qla_tgt_sess *
4055qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn,
4056 port_id_t port_id, uint16_t loop_id)
4057{
4058 struct qla_tgt_sess *sess = NULL, *other_sess;
4059 uint64_t other_wwn;
4060
4061 list_for_each_entry(other_sess, &tgt->sess_list, sess_list_entry) {
4062
4063 other_wwn = wwn_to_u64(other_sess->port_name);
4064
4065 if (wwn == other_wwn) {
4066 WARN_ON(sess);
4067 sess = other_sess;
4068 continue;
4069 }
4070
4071 /* find other sess with nport_id collision */
4072 if (port_id.b24 == other_sess->s_id.b24) {
4073 if (loop_id != other_sess->loop_id) {
4074 ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000c,
4075 "Invalidating sess %p loop_id %d wwn %llx.\n",
4076 other_sess, other_sess->loop_id, other_wwn);
4077
4078 /*
4079 * logout_on_delete is set by default, but another
4080 * session that has the same s_id/loop_id combo
4081 * might have cleared it when requested this session
4082 * deletion, so don't touch it
4083 */
4084 qlt_schedule_sess_for_deletion(other_sess, true);
4085 } else {
4086 /*
4087 * Another wwn used to have our s_id/loop_id
4088 * combo - kill the session, but don't log out
4089 */
4090 sess->logout_on_delete = 0;
4091 qlt_schedule_sess_for_deletion(other_sess,
4092 true);
4093 }
4094 continue;
4095 }
4096
4097 /* find other sess with nport handle collision */
4098 if (loop_id == other_sess->loop_id) {
4099 ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000d,
4100 "Invalidating sess %p loop_id %d wwn %llx.\n",
4101 other_sess, other_sess->loop_id, other_wwn);
4102
4103 /* Same loop_id but different s_id
4104 * Ok to kill and logout */
4105 qlt_schedule_sess_for_deletion(other_sess, true);
4106 }
4107 }
4108
4109 return sess;
4110}
4111
4112/* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */
4113static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
4114{
4115 struct qla_tgt_sess_op *op;
4116 struct qla_tgt_cmd *cmd;
4117 uint32_t key;
4118 int count = 0;
4119
4120 key = (((u32)s_id->b.domain << 16) |
4121 ((u32)s_id->b.area << 8) |
4122 ((u32)s_id->b.al_pa));
4123
4124 spin_lock(&vha->cmd_list_lock);
4125 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
4126 uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
4127 if (op_key == key) {
4128 op->aborted = true;
4129 count++;
4130 }
4131 }
4132 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
4133 uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
4134 if (cmd_key == key) {
4135 cmd->state = QLA_TGT_STATE_ABORTED;
4136 count++;
4137 }
4138 }
4139 spin_unlock(&vha->cmd_list_lock);
4140
4141 return count;
4142}
4143
3667/* 4144/*
3668 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 4145 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3669 */ 4146 */
3670static int qlt_24xx_handle_els(struct scsi_qla_host *vha, 4147static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
3671 struct imm_ntfy_from_isp *iocb) 4148 struct imm_ntfy_from_isp *iocb)
3672{ 4149{
4150 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4151 struct qla_hw_data *ha = vha->hw;
4152 struct qla_tgt_sess *sess = NULL;
4153 uint64_t wwn;
4154 port_id_t port_id;
4155 uint16_t loop_id;
4156 uint16_t wd3_lo;
3673 int res = 0; 4157 int res = 0;
3674 4158
4159 wwn = wwn_to_u64(iocb->u.isp24.port_name);
4160
4161 port_id.b.domain = iocb->u.isp24.port_id[2];
4162 port_id.b.area = iocb->u.isp24.port_id[1];
4163 port_id.b.al_pa = iocb->u.isp24.port_id[0];
4164 port_id.b.rsvd_1 = 0;
4165
4166 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
4167
3675 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026, 4168 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
3676 "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n", 4169 "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n",
3677 vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode); 4170 vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode);
3678 4171
4172 /* res = 1 means ack at the end of thread
4173 * res = 0 means ack async/later.
4174 */
3679 switch (iocb->u.isp24.status_subcode) { 4175 switch (iocb->u.isp24.status_subcode) {
3680 case ELS_PLOGI: 4176 case ELS_PLOGI:
3681 case ELS_FLOGI: 4177
4178 /* Mark all stale commands in qla_tgt_wq for deletion */
4179 abort_cmds_for_s_id(vha, &port_id);
4180
4181 if (wwn)
4182 sess = qlt_find_sess_invalidate_other(tgt, wwn,
4183 port_id, loop_id);
4184
4185 if (!sess || IS_SW_RESV_ADDR(sess->s_id)) {
4186 res = 1;
4187 break;
4188 }
4189
4190 if (sess->plogi_ack_needed) {
4191 /*
4192 * Initiator sent another PLOGI before last PLOGI could
4193 * finish. Swap plogi iocbs and terminate old one
4194 * without acking, new one will get acked when session
4195 * deletion completes.
4196 */
4197 ql_log(ql_log_warn, sess->vha, 0xf094,
4198 "sess %p received double plogi.\n", sess);
4199
4200 qlt_swap_imm_ntfy_iocb(iocb, &sess->tm_iocb);
4201
4202 qlt_send_term_imm_notif(vha, iocb, 1);
4203
4204 res = 0;
4205 break;
4206 }
4207
4208 res = 0;
4209
4210 /*
4211 * Save immediate Notif IOCB for Ack when sess is done
4212 * and being deleted.
4213 */
4214 memcpy(&sess->tm_iocb, iocb, sizeof(sess->tm_iocb));
4215 sess->plogi_ack_needed = 1;
4216
4217 /*
4218 * Under normal circumstances we want to release nport handle
4219 * during LOGO process to avoid nport handle leaks inside FW.
4220 * The exception is when LOGO is done while another PLOGI with
4221 * the same nport handle is waiting as might be the case here.
4222 * Note: there is always a possibily of a race where session
4223 * deletion has already started for other reasons (e.g. ACL
4224 * removal) and now PLOGI arrives:
4225 * 1. if PLOGI arrived in FW after nport handle has been freed,
4226 * FW must have assigned this PLOGI a new/same handle and we
4227 * can proceed ACK'ing it as usual when session deletion
4228 * completes.
4229 * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
4230 * bit reached it, the handle has now been released. We'll
4231 * get an error when we ACK this PLOGI. Nothing will be sent
4232 * back to initiator. Initiator should eventually retry
4233 * PLOGI and situation will correct itself.
4234 */
4235 sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
4236 (sess->s_id.b24 == port_id.b24));
4237 qlt_schedule_sess_for_deletion(sess, true);
4238 break;
4239
3682 case ELS_PRLI: 4240 case ELS_PRLI:
4241 wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
4242
4243 if (wwn)
4244 sess = qlt_find_sess_invalidate_other(tgt, wwn, port_id,
4245 loop_id);
4246
4247 if (sess != NULL) {
4248 if (sess->deleted) {
4249 /*
4250 * Impatient initiator sent PRLI before last
4251 * PLOGI could finish. Will force him to re-try,
4252 * while last one finishes.
4253 */
4254 ql_log(ql_log_warn, sess->vha, 0xf095,
4255 "sess %p PRLI received, before plogi ack.\n",
4256 sess);
4257 qlt_send_term_imm_notif(vha, iocb, 1);
4258 res = 0;
4259 break;
4260 }
4261
4262 /*
4263 * This shouldn't happen under normal circumstances,
4264 * since we have deleted the old session during PLOGI
4265 */
4266 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096,
4267 "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n",
4268 sess->loop_id, sess, iocb->u.isp24.nport_handle);
4269
4270 sess->local = 0;
4271 sess->loop_id = loop_id;
4272 sess->s_id = port_id;
4273
4274 if (wd3_lo & BIT_7)
4275 sess->conf_compl_supported = 1;
4276
4277 }
4278 res = 1; /* send notify ack */
4279
4280 /* Make session global (not used in fabric mode) */
4281 if (ha->current_topology != ISP_CFG_F) {
4282 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4283 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4284 qla2xxx_wake_dpc(vha);
4285 } else {
4286 /* todo: else - create sess here. */
4287 res = 1; /* send notify ack */
4288 }
4289
4290 break;
4291
3683 case ELS_LOGO: 4292 case ELS_LOGO:
3684 case ELS_PRLO: 4293 case ELS_PRLO:
3685 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); 4294 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
@@ -3697,6 +4306,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
3697 break; 4306 break;
3698 } 4307 }
3699 4308
4309 case ELS_FLOGI: /* should never happen */
3700 default: 4310 default:
3701 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061, 4311 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
3702 "qla_target(%d): Unsupported ELS command %x " 4312 "qla_target(%d): Unsupported ELS command %x "
@@ -5012,6 +5622,11 @@ static void qlt_abort_work(struct qla_tgt *tgt,
5012 if (!sess) 5622 if (!sess)
5013 goto out_term; 5623 goto out_term;
5014 } else { 5624 } else {
5625 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
5626 sess = NULL;
5627 goto out_term;
5628 }
5629
5015 kref_get(&sess->se_sess->sess_kref); 5630 kref_get(&sess->se_sess->sess_kref);
5016 } 5631 }
5017 5632
@@ -5066,6 +5681,11 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
5066 if (!sess) 5681 if (!sess)
5067 goto out_term; 5682 goto out_term;
5068 } else { 5683 } else {
5684 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
5685 sess = NULL;
5686 goto out_term;
5687 }
5688
5069 kref_get(&sess->se_sess->sess_kref); 5689 kref_get(&sess->se_sess->sess_kref);
5070 } 5690 }
5071 5691
@@ -5552,6 +6172,7 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
5552 6172
5553 /* Adjust ring index */ 6173 /* Adjust ring index */
5554 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index); 6174 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
6175 RD_REG_DWORD_RELAXED(ISP_ATIO_Q_OUT(vha));
5555} 6176}
5556 6177
5557void 6178void
@@ -5793,7 +6414,7 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
5793 if (!QLA_TGT_MODE_ENABLED()) 6414 if (!QLA_TGT_MODE_ENABLED())
5794 return; 6415 return;
5795 6416
5796 if (ha->mqenable || IS_QLA83XX(ha)) { 6417 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
5797 ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in; 6418 ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
5798 ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out; 6419 ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
5799 } else { 6420 } else {
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 985d76dd706b..bca584ae45b7 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -167,7 +167,24 @@ struct imm_ntfy_from_isp {
167 uint32_t srr_rel_offs; 167 uint32_t srr_rel_offs;
168 uint16_t srr_ui; 168 uint16_t srr_ui;
169 uint16_t srr_ox_id; 169 uint16_t srr_ox_id;
170 uint8_t reserved_4[19]; 170 union {
171 struct {
172 uint8_t node_name[8];
173 } plogi; /* PLOGI/ADISC/PDISC */
174 struct {
175 /* PRLI word 3 bit 0-15 */
176 uint16_t wd3_lo;
177 uint8_t resv0[6];
178 } prli;
179 struct {
180 uint8_t port_id[3];
181 uint8_t resv1;
182 uint16_t nport_handle;
183 uint16_t resv2;
184 } req_els;
185 } u;
186 uint8_t port_name[8];
187 uint8_t resv3[3];
171 uint8_t vp_index; 188 uint8_t vp_index;
172 uint32_t reserved_5; 189 uint32_t reserved_5;
173 uint8_t port_id[3]; 190 uint8_t port_id[3];
@@ -234,6 +251,7 @@ struct nack_to_isp {
234 uint8_t reserved[2]; 251 uint8_t reserved[2];
235 uint16_t ox_id; 252 uint16_t ox_id;
236} __packed; 253} __packed;
254#define NOTIFY_ACK_FLAGS_TERMINATE BIT_3
237#define NOTIFY_ACK_SRR_FLAGS_ACCEPT 0 255#define NOTIFY_ACK_SRR_FLAGS_ACCEPT 0
238#define NOTIFY_ACK_SRR_FLAGS_REJECT 1 256#define NOTIFY_ACK_SRR_FLAGS_REJECT 1
239 257
@@ -790,13 +808,6 @@ int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
790#define FC_TM_REJECT 4 808#define FC_TM_REJECT 4
791#define FC_TM_FAILED 5 809#define FC_TM_FAILED 5
792 810
793/*
794 * Error code of qlt_pre_xmit_response() meaning that cmd's exchange was
795 * terminated, so no more actions is needed and success should be returned
796 * to target.
797 */
798#define QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED 0x1717
799
800#if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G) 811#if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G)
801#define pci_dma_lo32(a) (a & 0xffffffff) 812#define pci_dma_lo32(a) (a & 0xffffffff)
802#define pci_dma_hi32(a) ((((a) >> 16)>>16) & 0xffffffff) 813#define pci_dma_hi32(a) ((((a) >> 16)>>16) & 0xffffffff)
@@ -874,6 +885,15 @@ struct qla_tgt_sess_op {
874 struct scsi_qla_host *vha; 885 struct scsi_qla_host *vha;
875 struct atio_from_isp atio; 886 struct atio_from_isp atio;
876 struct work_struct work; 887 struct work_struct work;
888 struct list_head cmd_list;
889 bool aborted;
890};
891
892enum qla_sess_deletion {
893 QLA_SESS_DELETION_NONE = 0,
894 QLA_SESS_DELETION_PENDING = 1, /* hopefully we can get rid of
895 * this one */
896 QLA_SESS_DELETION_IN_PROGRESS = 2,
877}; 897};
878 898
879/* 899/*
@@ -884,8 +904,15 @@ struct qla_tgt_sess {
884 port_id_t s_id; 904 port_id_t s_id;
885 905
886 unsigned int conf_compl_supported:1; 906 unsigned int conf_compl_supported:1;
887 unsigned int deleted:1; 907 unsigned int deleted:2;
888 unsigned int local:1; 908 unsigned int local:1;
909 unsigned int logout_on_delete:1;
910 unsigned int plogi_ack_needed:1;
911 unsigned int keep_nport_handle:1;
912
913 unsigned char logout_completed;
914
915 int generation;
889 916
890 struct se_session *se_sess; 917 struct se_session *se_sess;
891 struct scsi_qla_host *vha; 918 struct scsi_qla_host *vha;
@@ -897,6 +924,10 @@ struct qla_tgt_sess {
897 924
898 uint8_t port_name[WWN_SIZE]; 925 uint8_t port_name[WWN_SIZE];
899 struct work_struct free_work; 926 struct work_struct free_work;
927
928 union {
929 struct imm_ntfy_from_isp tm_iocb;
930 };
900}; 931};
901 932
902struct qla_tgt_cmd { 933struct qla_tgt_cmd {
@@ -912,7 +943,6 @@ struct qla_tgt_cmd {
912 unsigned int conf_compl_supported:1; 943 unsigned int conf_compl_supported:1;
913 unsigned int sg_mapped:1; 944 unsigned int sg_mapped:1;
914 unsigned int free_sg:1; 945 unsigned int free_sg:1;
915 unsigned int aborted:1; /* Needed in case of SRR */
916 unsigned int write_data_transferred:1; 946 unsigned int write_data_transferred:1;
917 unsigned int ctx_dsd_alloced:1; 947 unsigned int ctx_dsd_alloced:1;
918 unsigned int q_full:1; 948 unsigned int q_full:1;
@@ -961,6 +991,9 @@ struct qla_tgt_cmd {
961 * BIT_14 - Back end data received/sent. 991 * BIT_14 - Back end data received/sent.
962 * BIT_15 - SRR prepare ctio 992 * BIT_15 - SRR prepare ctio
963 * BIT_16 - complete free 993 * BIT_16 - complete free
994 * BIT_17 - flush - qlt_abort_cmd_on_host_reset
995 * BIT_18 - completion w/abort status
996 * BIT_19 - completion w/unknown status
964 */ 997 */
965 uint32_t cmd_flags; 998 uint32_t cmd_flags;
966}; 999};
@@ -1026,6 +1059,10 @@ struct qla_tgt_srr_ctio {
1026 struct qla_tgt_cmd *cmd; 1059 struct qla_tgt_cmd *cmd;
1027}; 1060};
1028 1061
1062/* Check for Switch reserved address */
1063#define IS_SW_RESV_ADDR(_s_id) \
1064 ((_s_id.b.domain == 0xff) && (_s_id.b.area == 0xfc))
1065
1029#define QLA_TGT_XMIT_DATA 1 1066#define QLA_TGT_XMIT_DATA 1
1030#define QLA_TGT_XMIT_STATUS 2 1067#define QLA_TGT_XMIT_STATUS 2
1031#define QLA_TGT_XMIT_ALL (QLA_TGT_XMIT_STATUS|QLA_TGT_XMIT_DATA) 1068#define QLA_TGT_XMIT_ALL (QLA_TGT_XMIT_STATUS|QLA_TGT_XMIT_DATA)
@@ -1043,7 +1080,7 @@ extern int qlt_lport_register(void *, u64, u64, u64,
1043extern void qlt_lport_deregister(struct scsi_qla_host *); 1080extern void qlt_lport_deregister(struct scsi_qla_host *);
1044extern void qlt_unreg_sess(struct qla_tgt_sess *); 1081extern void qlt_unreg_sess(struct qla_tgt_sess *);
1045extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *); 1082extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *);
1046extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *); 1083extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *, int);
1047extern int __init qlt_init(void); 1084extern int __init qlt_init(void);
1048extern void qlt_exit(void); 1085extern void qlt_exit(void);
1049extern void qlt_update_vp_map(struct scsi_qla_host *, int); 1086extern void qlt_update_vp_map(struct scsi_qla_host *, int);
@@ -1073,12 +1110,23 @@ static inline void qla_reverse_ini_mode(struct scsi_qla_host *ha)
1073 ha->host->active_mode |= MODE_INITIATOR; 1110 ha->host->active_mode |= MODE_INITIATOR;
1074} 1111}
1075 1112
1113static inline uint32_t sid_to_key(const uint8_t *s_id)
1114{
1115 uint32_t key;
1116
1117 key = (((unsigned long)s_id[0] << 16) |
1118 ((unsigned long)s_id[1] << 8) |
1119 (unsigned long)s_id[2]);
1120 return key;
1121}
1122
1076/* 1123/*
1077 * Exported symbols from qla_target.c LLD logic used by qla2xxx code.. 1124 * Exported symbols from qla_target.c LLD logic used by qla2xxx code..
1078 */ 1125 */
1079extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *); 1126extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *);
1080extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *); 1127extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
1081extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t); 1128extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
1129extern void qlt_abort_cmd(struct qla_tgt_cmd *);
1082extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *); 1130extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
1083extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *); 1131extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
1084extern void qlt_free_cmd(struct qla_tgt_cmd *cmd); 1132extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
@@ -1109,5 +1157,7 @@ extern void qlt_stop_phase2(struct qla_tgt *);
1109extern irqreturn_t qla83xx_msix_atio_q(int, void *); 1157extern irqreturn_t qla83xx_msix_atio_q(int, void *);
1110extern void qlt_83xx_iospace_config(struct qla_hw_data *); 1158extern void qlt_83xx_iospace_config(struct qla_hw_data *);
1111extern int qlt_free_qfull_cmds(struct scsi_qla_host *); 1159extern int qlt_free_qfull_cmds(struct scsi_qla_host *);
1160extern void qlt_logo_completion_handler(fc_port_t *, int);
1161extern void qlt_do_generation_tick(struct scsi_qla_host *, int *);
1112 1162
1113#endif /* __QLA_TARGET_H */ 1163#endif /* __QLA_TARGET_H */
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index d9a8c6084346..9224a06646e6 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -374,7 +374,7 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
374{ 374{
375 struct qla_tgt_cmd *cmd = container_of(se_cmd, 375 struct qla_tgt_cmd *cmd = container_of(se_cmd,
376 struct qla_tgt_cmd, se_cmd); 376 struct qla_tgt_cmd, se_cmd);
377 377 cmd->cmd_flags |= BIT_3;
378 cmd->bufflen = se_cmd->data_length; 378 cmd->bufflen = se_cmd->data_length;
379 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); 379 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
380 380
@@ -405,7 +405,7 @@ static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd)
405 se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) { 405 se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) {
406 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); 406 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
407 wait_for_completion_timeout(&se_cmd->t_transport_stop_comp, 407 wait_for_completion_timeout(&se_cmd->t_transport_stop_comp,
408 3000); 408 3 * HZ);
409 return 0; 409 return 0;
410 } 410 }
411 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); 411 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
@@ -541,12 +541,10 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
541 cmd->cmd_flags |= BIT_4; 541 cmd->cmd_flags |= BIT_4;
542 cmd->bufflen = se_cmd->data_length; 542 cmd->bufflen = se_cmd->data_length;
543 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); 543 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
544 cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
545 544
546 cmd->sg_cnt = se_cmd->t_data_nents; 545 cmd->sg_cnt = se_cmd->t_data_nents;
547 cmd->sg = se_cmd->t_data_sg; 546 cmd->sg = se_cmd->t_data_sg;
548 cmd->offset = 0; 547 cmd->offset = 0;
549 cmd->cmd_flags |= BIT_3;
550 548
551 cmd->prot_sg_cnt = se_cmd->t_prot_nents; 549 cmd->prot_sg_cnt = se_cmd->t_prot_nents;
552 cmd->prot_sg = se_cmd->t_prot_sg; 550 cmd->prot_sg = se_cmd->t_prot_sg;
@@ -571,7 +569,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
571 cmd->sg_cnt = 0; 569 cmd->sg_cnt = 0;
572 cmd->offset = 0; 570 cmd->offset = 0;
573 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); 571 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
574 cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
575 if (cmd->cmd_flags & BIT_5) { 572 if (cmd->cmd_flags & BIT_5) {
576 pr_crit("Bit_5 already set for cmd = %p.\n", cmd); 573 pr_crit("Bit_5 already set for cmd = %p.\n", cmd);
577 dump_stack(); 574 dump_stack();
@@ -636,14 +633,7 @@ static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
636{ 633{
637 struct qla_tgt_cmd *cmd = container_of(se_cmd, 634 struct qla_tgt_cmd *cmd = container_of(se_cmd,
638 struct qla_tgt_cmd, se_cmd); 635 struct qla_tgt_cmd, se_cmd);
639 struct scsi_qla_host *vha = cmd->vha; 636 qlt_abort_cmd(cmd);
640 struct qla_hw_data *ha = vha->hw;
641
642 if (!cmd->sg_mapped)
643 return;
644
645 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
646 cmd->sg_mapped = 0;
647} 637}
648 638
649static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *, 639static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
@@ -1149,9 +1139,7 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id(
1149 return NULL; 1139 return NULL;
1150 } 1140 }
1151 1141
1152 key = (((unsigned long)s_id[0] << 16) | 1142 key = sid_to_key(s_id);
1153 ((unsigned long)s_id[1] << 8) |
1154 (unsigned long)s_id[2]);
1155 pr_debug("find_sess_by_s_id: 0x%06x\n", key); 1143 pr_debug("find_sess_by_s_id: 0x%06x\n", key);
1156 1144
1157 se_nacl = btree_lookup32(&lport->lport_fcport_map, key); 1145 se_nacl = btree_lookup32(&lport->lport_fcport_map, key);
@@ -1186,9 +1174,7 @@ static void tcm_qla2xxx_set_sess_by_s_id(
1186 void *slot; 1174 void *slot;
1187 int rc; 1175 int rc;
1188 1176
1189 key = (((unsigned long)s_id[0] << 16) | 1177 key = sid_to_key(s_id);
1190 ((unsigned long)s_id[1] << 8) |
1191 (unsigned long)s_id[2]);
1192 pr_debug("set_sess_by_s_id: %06x\n", key); 1178 pr_debug("set_sess_by_s_id: %06x\n", key);
1193 1179
1194 slot = btree_lookup32(&lport->lport_fcport_map, key); 1180 slot = btree_lookup32(&lport->lport_fcport_map, key);
@@ -1544,6 +1530,10 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id,
1544 } 1530 }
1545 1531
1546 sess->conf_compl_supported = conf_compl_supported; 1532 sess->conf_compl_supported = conf_compl_supported;
1533
1534 /* Reset logout parameters to default */
1535 sess->logout_on_delete = 1;
1536 sess->keep_nport_handle = 0;
1547} 1537}
1548 1538
1549/* 1539/*
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 106884a5444e..6457a8a0db9c 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -26,7 +26,6 @@
26#include <linux/blkdev.h> 26#include <linux/blkdev.h>
27#include <linux/delay.h> 27#include <linux/delay.h>
28#include <linux/jiffies.h> 28#include <linux/jiffies.h>
29#include <asm/unaligned.h>
30 29
31#include <scsi/scsi.h> 30#include <scsi/scsi.h>
32#include <scsi/scsi_cmnd.h> 31#include <scsi/scsi_cmnd.h>
@@ -944,7 +943,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
944 scmd->sdb.length); 943 scmd->sdb.length);
945 scmd->sdb.table.sgl = &ses->sense_sgl; 944 scmd->sdb.table.sgl = &ses->sense_sgl;
946 scmd->sc_data_direction = DMA_FROM_DEVICE; 945 scmd->sc_data_direction = DMA_FROM_DEVICE;
947 scmd->sdb.table.nents = 1; 946 scmd->sdb.table.nents = scmd->sdb.table.orig_nents = 1;
948 scmd->cmnd[0] = REQUEST_SENSE; 947 scmd->cmnd[0] = REQUEST_SENSE;
949 scmd->cmnd[4] = scmd->sdb.length; 948 scmd->cmnd[4] = scmd->sdb.length;
950 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); 949 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
@@ -2523,33 +2522,3 @@ void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq)
2523 } 2522 }
2524} 2523}
2525EXPORT_SYMBOL(scsi_build_sense_buffer); 2524EXPORT_SYMBOL(scsi_build_sense_buffer);
2526
2527/**
2528 * scsi_set_sense_information - set the information field in a
2529 * formatted sense data buffer
2530 * @buf: Where to build sense data
2531 * @info: 64-bit information value to be set
2532 *
2533 **/
2534void scsi_set_sense_information(u8 *buf, u64 info)
2535{
2536 if ((buf[0] & 0x7f) == 0x72) {
2537 u8 *ucp, len;
2538
2539 len = buf[7];
2540 ucp = (char *)scsi_sense_desc_find(buf, len + 8, 0);
2541 if (!ucp) {
2542 buf[7] = len + 0xa;
2543 ucp = buf + 8 + len;
2544 }
2545 ucp[0] = 0;
2546 ucp[1] = 0xa;
2547 ucp[2] = 0x80; /* Valid bit */
2548 ucp[3] = 0;
2549 put_unaligned_be64(info, &ucp[4]);
2550 } else if ((buf[0] & 0x7f) == 0x70) {
2551 buf[0] |= 0x80;
2552 put_unaligned_be64(info, &buf[3]);
2553 }
2554}
2555EXPORT_SYMBOL(scsi_set_sense_information);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index b1a263137a23..448ebdaa3d69 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -583,7 +583,7 @@ static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
583 583
584static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq) 584static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq)
585{ 585{
586 if (mq && sdb->table.nents <= SCSI_MAX_SG_SEGMENTS) 586 if (mq && sdb->table.orig_nents <= SCSI_MAX_SG_SEGMENTS)
587 return; 587 return;
588 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, mq, scsi_sg_free); 588 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, mq, scsi_sg_free);
589} 589}
@@ -597,8 +597,8 @@ static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq)
597 597
598 if (mq) { 598 if (mq) {
599 if (nents <= SCSI_MAX_SG_SEGMENTS) { 599 if (nents <= SCSI_MAX_SG_SEGMENTS) {
600 sdb->table.nents = nents; 600 sdb->table.nents = sdb->table.orig_nents = nents;
601 sg_init_table(sdb->table.sgl, sdb->table.nents); 601 sg_init_table(sdb->table.sgl, nents);
602 return 0; 602 return 0;
603 } 603 }
604 first_chunk = sdb->table.sgl; 604 first_chunk = sdb->table.sgl;
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index 9e43ae1d2163..e4b799837948 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -217,15 +217,15 @@ static int sdev_runtime_suspend(struct device *dev)
217{ 217{
218 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 218 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
219 struct scsi_device *sdev = to_scsi_device(dev); 219 struct scsi_device *sdev = to_scsi_device(dev);
220 int err; 220 int err = 0;
221 221
222 err = blk_pre_runtime_suspend(sdev->request_queue); 222 if (pm && pm->runtime_suspend) {
223 if (err) 223 err = blk_pre_runtime_suspend(sdev->request_queue);
224 return err; 224 if (err)
225 if (pm && pm->runtime_suspend) 225 return err;
226 err = pm->runtime_suspend(dev); 226 err = pm->runtime_suspend(dev);
227 blk_post_runtime_suspend(sdev->request_queue, err); 227 blk_post_runtime_suspend(sdev->request_queue, err);
228 228 }
229 return err; 229 return err;
230} 230}
231 231
@@ -248,11 +248,11 @@ static int sdev_runtime_resume(struct device *dev)
248 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 248 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
249 int err = 0; 249 int err = 0;
250 250
251 blk_pre_runtime_resume(sdev->request_queue); 251 if (pm && pm->runtime_resume) {
252 if (pm && pm->runtime_resume) 252 blk_pre_runtime_resume(sdev->request_queue);
253 err = pm->runtime_resume(dev); 253 err = pm->runtime_resume(dev);
254 blk_post_runtime_resume(sdev->request_queue, err); 254 blk_post_runtime_resume(sdev->request_queue, err);
255 255 }
256 return err; 256 return err;
257} 257}
258 258
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 1ac38e73df7e..9ad41168d26d 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -859,7 +859,7 @@ sdev_store_queue_depth(struct device *dev, struct device_attribute *attr,
859 859
860 depth = simple_strtoul(buf, NULL, 0); 860 depth = simple_strtoul(buf, NULL, 0);
861 861
862 if (depth < 1 || depth > sht->can_queue) 862 if (depth < 1 || depth > sdev->host->can_queue)
863 return -EINVAL; 863 return -EINVAL;
864 864
865 retval = sht->change_queue_depth(sdev, depth); 865 retval = sht->change_queue_depth(sdev, depth);
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index a85292b1d09d..e3cd3ece4412 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -203,7 +203,7 @@ static ssize_t srp_show_tmo(char *buf, int tmo)
203 return tmo >= 0 ? sprintf(buf, "%d\n", tmo) : sprintf(buf, "off\n"); 203 return tmo >= 0 ? sprintf(buf, "%d\n", tmo) : sprintf(buf, "off\n");
204} 204}
205 205
206static int srp_parse_tmo(int *tmo, const char *buf) 206int srp_parse_tmo(int *tmo, const char *buf)
207{ 207{
208 int res = 0; 208 int res = 0;
209 209
@@ -214,6 +214,7 @@ static int srp_parse_tmo(int *tmo, const char *buf)
214 214
215 return res; 215 return res;
216} 216}
217EXPORT_SYMBOL(srp_parse_tmo);
217 218
218static ssize_t show_reconnect_delay(struct device *dev, 219static ssize_t show_reconnect_delay(struct device *dev,
219 struct device_attribute *attr, char *buf) 220 struct device_attribute *attr, char *buf)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 3b2fcb4fada0..a20da8c25b4f 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2770,9 +2770,9 @@ static int sd_revalidate_disk(struct gendisk *disk)
2770 max_xfer = sdkp->max_xfer_blocks; 2770 max_xfer = sdkp->max_xfer_blocks;
2771 max_xfer <<= ilog2(sdp->sector_size) - 9; 2771 max_xfer <<= ilog2(sdp->sector_size) - 9;
2772 2772
2773 max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), 2773 sdkp->disk->queue->limits.max_sectors =
2774 max_xfer); 2774 min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), max_xfer);
2775 blk_queue_max_hw_sectors(sdkp->disk->queue, max_xfer); 2775
2776 set_capacity(disk, sdkp->capacity); 2776 set_capacity(disk, sdkp->capacity);
2777 sd_config_write_same(sdkp); 2777 sd_config_write_same(sdkp);
2778 kfree(buffer); 2778 kfree(buffer);
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 3f25b8fa921d..871f3553987d 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -1329,9 +1329,9 @@ static int st_open(struct inode *inode, struct file *filp)
1329 spin_lock(&st_use_lock); 1329 spin_lock(&st_use_lock);
1330 STp->in_use = 0; 1330 STp->in_use = 0;
1331 spin_unlock(&st_use_lock); 1331 spin_unlock(&st_use_lock);
1332 scsi_tape_put(STp);
1333 if (resumed) 1332 if (resumed)
1334 scsi_autopm_put_device(STp->device); 1333 scsi_autopm_put_device(STp->device);
1334 scsi_tape_put(STp);
1335 return retval; 1335 return retval;
1336 1336
1337} 1337}
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 285f77544c36..7dbbb29d24c6 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -949,7 +949,7 @@ static int virtscsi_probe(struct virtio_device *vdev)
949{ 949{
950 struct Scsi_Host *shost; 950 struct Scsi_Host *shost;
951 struct virtio_scsi *vscsi; 951 struct virtio_scsi *vscsi;
952 int err, host_prot; 952 int err;
953 u32 sg_elems, num_targets; 953 u32 sg_elems, num_targets;
954 u32 cmd_per_lun; 954 u32 cmd_per_lun;
955 u32 num_queues; 955 u32 num_queues;
@@ -1009,6 +1009,8 @@ static int virtscsi_probe(struct virtio_device *vdev)
1009 1009
1010#ifdef CONFIG_BLK_DEV_INTEGRITY 1010#ifdef CONFIG_BLK_DEV_INTEGRITY
1011 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) { 1011 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) {
1012 int host_prot;
1013
1012 host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | 1014 host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
1013 SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION | 1015 SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
1014 SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION; 1016 SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 0cae1694014d..b0f30fb68914 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -612,7 +612,7 @@ config SPI_XTENSA_XTFPGA
612 612
613config SPI_ZYNQMP_GQSPI 613config SPI_ZYNQMP_GQSPI
614 tristate "Xilinx ZynqMP GQSPI controller" 614 tristate "Xilinx ZynqMP GQSPI controller"
615 depends on SPI_MASTER 615 depends on SPI_MASTER && HAS_DMA
616 help 616 help
617 Enables Xilinx GQSPI controller driver for Zynq UltraScale+ MPSoC. 617 Enables Xilinx GQSPI controller driver for Zynq UltraScale+ MPSoC.
618 618
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index 788e2b176a4f..acce90ac7371 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -40,6 +40,7 @@
40#define SPFI_CONTROL_SOFT_RESET BIT(11) 40#define SPFI_CONTROL_SOFT_RESET BIT(11)
41#define SPFI_CONTROL_SEND_DMA BIT(10) 41#define SPFI_CONTROL_SEND_DMA BIT(10)
42#define SPFI_CONTROL_GET_DMA BIT(9) 42#define SPFI_CONTROL_GET_DMA BIT(9)
43#define SPFI_CONTROL_SE BIT(8)
43#define SPFI_CONTROL_TMODE_SHIFT 5 44#define SPFI_CONTROL_TMODE_SHIFT 5
44#define SPFI_CONTROL_TMODE_MASK 0x7 45#define SPFI_CONTROL_TMODE_MASK 0x7
45#define SPFI_CONTROL_TMODE_SINGLE 0 46#define SPFI_CONTROL_TMODE_SINGLE 0
@@ -491,6 +492,7 @@ static void img_spfi_config(struct spi_master *master, struct spi_device *spi,
491 else if (xfer->tx_nbits == SPI_NBITS_QUAD && 492 else if (xfer->tx_nbits == SPI_NBITS_QUAD &&
492 xfer->rx_nbits == SPI_NBITS_QUAD) 493 xfer->rx_nbits == SPI_NBITS_QUAD)
493 val |= SPFI_CONTROL_TMODE_QUAD << SPFI_CONTROL_TMODE_SHIFT; 494 val |= SPFI_CONTROL_TMODE_QUAD << SPFI_CONTROL_TMODE_SHIFT;
495 val |= SPFI_CONTROL_SE;
494 spfi_writel(spfi, val, SPFI_CONTROL); 496 spfi_writel(spfi, val, SPFI_CONTROL);
495} 497}
496 498
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index eb7d3a6fb14c..f9deb84e4e55 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -201,8 +201,9 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
201{ 201{
202 struct spi_imx_data *spi_imx = spi_master_get_devdata(master); 202 struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
203 203
204 if (spi_imx->dma_is_inited && (transfer->len > spi_imx->rx_wml) 204 if (spi_imx->dma_is_inited
205 && (transfer->len > spi_imx->tx_wml)) 205 && transfer->len > spi_imx->rx_wml * sizeof(u32)
206 && transfer->len > spi_imx->tx_wml * sizeof(u32))
206 return true; 207 return true;
207 return false; 208 return false;
208} 209}
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
index 87b20a511a6b..f23f36ebaf3d 100644
--- a/drivers/spi/spi-zynqmp-gqspi.c
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -214,6 +214,7 @@ static void zynqmp_gqspi_selectslave(struct zynqmp_qspi *instanceptr,
214 case GQSPI_SELECT_FLASH_CS_BOTH: 214 case GQSPI_SELECT_FLASH_CS_BOTH:
215 instanceptr->genfifocs = GQSPI_GENFIFO_CS_LOWER | 215 instanceptr->genfifocs = GQSPI_GENFIFO_CS_LOWER |
216 GQSPI_GENFIFO_CS_UPPER; 216 GQSPI_GENFIFO_CS_UPPER;
217 break;
217 case GQSPI_SELECT_FLASH_CS_UPPER: 218 case GQSPI_SELECT_FLASH_CS_UPPER:
218 instanceptr->genfifocs = GQSPI_GENFIFO_CS_UPPER; 219 instanceptr->genfifocs = GQSPI_GENFIFO_CS_UPPER;
219 break; 220 break;
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index dd616ff0ffc5..c7de64171c45 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -693,6 +693,7 @@ static struct class *spidev_class;
693#ifdef CONFIG_OF 693#ifdef CONFIG_OF
694static const struct of_device_id spidev_dt_ids[] = { 694static const struct of_device_id spidev_dt_ids[] = {
695 { .compatible = "rohm,dh2228fv" }, 695 { .compatible = "rohm,dh2228fv" },
696 { .compatible = "lineartechnology,ltc2488" },
696 {}, 697 {},
697}; 698};
698MODULE_DEVICE_TABLE(of, spidev_dt_ids); 699MODULE_DEVICE_TABLE(of, spidev_dt_ids);
diff --git a/drivers/staging/board/Kconfig b/drivers/staging/board/Kconfig
index b8ee81840666..3f287c48e082 100644
--- a/drivers/staging/board/Kconfig
+++ b/drivers/staging/board/Kconfig
@@ -1,6 +1,6 @@
1config STAGING_BOARD 1config STAGING_BOARD
2 bool "Staging Board Support" 2 bool "Staging Board Support"
3 depends on OF_ADDRESS 3 depends on OF_ADDRESS && OF_IRQ && CLKDEV_LOOKUP
4 help 4 help
5 Select to enable per-board staging support code. 5 Select to enable per-board staging support code.
6 6
diff --git a/drivers/staging/comedi/drivers/das1800.c b/drivers/staging/comedi/drivers/das1800.c
index bfa42620a3f6..940781183fac 100644
--- a/drivers/staging/comedi/drivers/das1800.c
+++ b/drivers/staging/comedi/drivers/das1800.c
@@ -1266,6 +1266,7 @@ static const struct das1800_board *das1800_probe(struct comedi_device *dev)
1266 if (index == das1801hc || index == das1802hc) 1266 if (index == das1801hc || index == das1802hc)
1267 return board; 1267 return board;
1268 index = das1801hc; 1268 index = das1801hc;
1269 break;
1269 default: 1270 default:
1270 dev_err(dev->class_dev, 1271 dev_err(dev->class_dev,
1271 "Board model: probe returned 0x%x (unknown, please report)\n", 1272 "Board model: probe returned 0x%x (unknown, please report)\n",
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
index 7125eb955ae5..8a9d4a0de129 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
@@ -31,7 +31,6 @@
31#define DEBUG_PORTAL_ALLOC 31#define DEBUG_PORTAL_ALLOC
32#define DEBUG_SUBSYSTEM S_LND 32#define DEBUG_SUBSYSTEM S_LND
33 33
34#include <asm/irq.h>
35#include <linux/crc32.h> 34#include <linux/crc32.h>
36#include <linux/errno.h> 35#include <linux/errno.h>
37#include <linux/if.h> 36#include <linux/if.h>
diff --git a/drivers/staging/lustre/lustre/obdclass/debug.c b/drivers/staging/lustre/lustre/obdclass/debug.c
index 9c934e6d2ea1..c61add46b426 100644
--- a/drivers/staging/lustre/lustre/obdclass/debug.c
+++ b/drivers/staging/lustre/lustre/obdclass/debug.c
@@ -40,7 +40,7 @@
40 40
41#define DEBUG_SUBSYSTEM D_OTHER 41#define DEBUG_SUBSYSTEM D_OTHER
42 42
43#include <linux/unaligned/access_ok.h> 43#include <asm/unaligned.h>
44 44
45#include "../include/obd_support.h" 45#include "../include/obd_support.h"
46#include "../include/lustre_debug.h" 46#include "../include/lustre_debug.h"
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index ed040fbb7df8..69bdc8f29b59 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -1418,7 +1418,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
1418 1418
1419 priv->current_aid = conf->aid; 1419 priv->current_aid = conf->aid;
1420 1420
1421 if (changed & BSS_CHANGED_BSSID) { 1421 if (changed & BSS_CHANGED_BSSID && conf->bssid) {
1422 unsigned long flags; 1422 unsigned long flags;
1423 1423
1424 spin_lock_irqsave(&priv->lock, flags); 1424 spin_lock_irqsave(&priv->lock, flags);
@@ -1483,8 +1483,9 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
1483 } 1483 }
1484 } 1484 }
1485 1485
1486 if (changed & BSS_CHANGED_ASSOC && priv->op_mode != NL80211_IFTYPE_AP) { 1486 if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BEACON_INFO) &&
1487 if (conf->assoc) { 1487 priv->op_mode != NL80211_IFTYPE_AP) {
1488 if (conf->assoc && conf->beacon_rate) {
1488 CARDbUpdateTSF(priv, conf->beacon_rate->hw_value, 1489 CARDbUpdateTSF(priv, conf->beacon_rate->hw_value,
1489 conf->sync_tsf); 1490 conf->sync_tsf);
1490 1491
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index f97323f19acf..af572d718135 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -701,7 +701,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
701 701
702 priv->current_aid = conf->aid; 702 priv->current_aid = conf->aid;
703 703
704 if (changed & BSS_CHANGED_BSSID) 704 if (changed & BSS_CHANGED_BSSID && conf->bssid)
705 vnt_mac_set_bssid_addr(priv, (u8 *)conf->bssid); 705 vnt_mac_set_bssid_addr(priv, (u8 *)conf->bssid);
706 706
707 707
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 4e68b62193ed..fd092909a457 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -968,9 +968,9 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
968 cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA; 968 cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA;
969 969
970 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt; 970 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
971 if (hdr->flags & ISCSI_FLAG_CMD_READ) { 971 if (hdr->flags & ISCSI_FLAG_CMD_READ)
972 cmd->targ_xfer_tag = session_get_next_ttt(conn->sess); 972 cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
973 } else if (hdr->flags & ISCSI_FLAG_CMD_WRITE) 973 else
974 cmd->targ_xfer_tag = 0xFFFFFFFF; 974 cmd->targ_xfer_tag = 0xFFFFFFFF;
975 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); 975 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
976 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn); 976 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
@@ -3998,7 +3998,13 @@ get_immediate:
3998 } 3998 }
3999 3999
4000transport_err: 4000transport_err:
4001 iscsit_take_action_for_connection_exit(conn); 4001 /*
4002 * Avoid the normal connection failure code-path if this connection
4003 * is still within LOGIN mode, and iscsi_np process context is
4004 * responsible for cleaning up the early connection failure.
4005 */
4006 if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN)
4007 iscsit_take_action_for_connection_exit(conn);
4002out: 4008out:
4003 return 0; 4009 return 0;
4004} 4010}
@@ -4082,7 +4088,7 @@ reject:
4082 4088
4083int iscsi_target_rx_thread(void *arg) 4089int iscsi_target_rx_thread(void *arg)
4084{ 4090{
4085 int ret; 4091 int ret, rc;
4086 u8 buffer[ISCSI_HDR_LEN], opcode; 4092 u8 buffer[ISCSI_HDR_LEN], opcode;
4087 u32 checksum = 0, digest = 0; 4093 u32 checksum = 0, digest = 0;
4088 struct iscsi_conn *conn = arg; 4094 struct iscsi_conn *conn = arg;
@@ -4092,10 +4098,16 @@ int iscsi_target_rx_thread(void *arg)
4092 * connection recovery / failure event can be triggered externally. 4098 * connection recovery / failure event can be triggered externally.
4093 */ 4099 */
4094 allow_signal(SIGINT); 4100 allow_signal(SIGINT);
4101 /*
4102 * Wait for iscsi_post_login_handler() to complete before allowing
4103 * incoming iscsi/tcp socket I/O, and/or failing the connection.
4104 */
4105 rc = wait_for_completion_interruptible(&conn->rx_login_comp);
4106 if (rc < 0)
4107 return 0;
4095 4108
4096 if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) { 4109 if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
4097 struct completion comp; 4110 struct completion comp;
4098 int rc;
4099 4111
4100 init_completion(&comp); 4112 init_completion(&comp);
4101 rc = wait_for_completion_interruptible(&comp); 4113 rc = wait_for_completion_interruptible(&comp);
@@ -4532,7 +4544,18 @@ static void iscsit_logout_post_handler_closesession(
4532 struct iscsi_conn *conn) 4544 struct iscsi_conn *conn)
4533{ 4545{
4534 struct iscsi_session *sess = conn->sess; 4546 struct iscsi_session *sess = conn->sess;
4535 int sleep = cmpxchg(&conn->tx_thread_active, true, false); 4547 int sleep = 1;
4548 /*
4549 * Traditional iscsi/tcp will invoke this logic from TX thread
4550 * context during session logout, so clear tx_thread_active and
4551 * sleep if iscsit_close_connection() has not already occured.
4552 *
4553 * Since iser-target invokes this logic from it's own workqueue,
4554 * always sleep waiting for RX/TX thread shutdown to complete
4555 * within iscsit_close_connection().
4556 */
4557 if (conn->conn_transport->transport_type == ISCSI_TCP)
4558 sleep = cmpxchg(&conn->tx_thread_active, true, false);
4536 4559
4537 atomic_set(&conn->conn_logout_remove, 0); 4560 atomic_set(&conn->conn_logout_remove, 0);
4538 complete(&conn->conn_logout_comp); 4561 complete(&conn->conn_logout_comp);
@@ -4546,7 +4569,10 @@ static void iscsit_logout_post_handler_closesession(
4546static void iscsit_logout_post_handler_samecid( 4569static void iscsit_logout_post_handler_samecid(
4547 struct iscsi_conn *conn) 4570 struct iscsi_conn *conn)
4548{ 4571{
4549 int sleep = cmpxchg(&conn->tx_thread_active, true, false); 4572 int sleep = 1;
4573
4574 if (conn->conn_transport->transport_type == ISCSI_TCP)
4575 sleep = cmpxchg(&conn->tx_thread_active, true, false);
4550 4576
4551 atomic_set(&conn->conn_logout_remove, 0); 4577 atomic_set(&conn->conn_logout_remove, 0);
4552 complete(&conn->conn_logout_comp); 4578 complete(&conn->conn_logout_comp);
@@ -4765,6 +4791,7 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
4765 struct iscsi_session *sess; 4791 struct iscsi_session *sess;
4766 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; 4792 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
4767 struct se_session *se_sess, *se_sess_tmp; 4793 struct se_session *se_sess, *se_sess_tmp;
4794 LIST_HEAD(free_list);
4768 int session_count = 0; 4795 int session_count = 0;
4769 4796
4770 spin_lock_bh(&se_tpg->session_lock); 4797 spin_lock_bh(&se_tpg->session_lock);
@@ -4786,14 +4813,17 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
4786 } 4813 }
4787 atomic_set(&sess->session_reinstatement, 1); 4814 atomic_set(&sess->session_reinstatement, 1);
4788 spin_unlock(&sess->conn_lock); 4815 spin_unlock(&sess->conn_lock);
4789 spin_unlock_bh(&se_tpg->session_lock);
4790 4816
4791 iscsit_free_session(sess); 4817 list_move_tail(&se_sess->sess_list, &free_list);
4792 spin_lock_bh(&se_tpg->session_lock); 4818 }
4819 spin_unlock_bh(&se_tpg->session_lock);
4820
4821 list_for_each_entry_safe(se_sess, se_sess_tmp, &free_list, sess_list) {
4822 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
4793 4823
4824 iscsit_free_session(sess);
4794 session_count++; 4825 session_count++;
4795 } 4826 }
4796 spin_unlock_bh(&se_tpg->session_lock);
4797 4827
4798 pr_debug("Released %d iSCSI Session(s) from Target Portal" 4828 pr_debug("Released %d iSCSI Session(s) from Target Portal"
4799 " Group: %hu\n", session_count, tpg->tpgt); 4829 " Group: %hu\n", session_count, tpg->tpgt);
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 3d0fe4ff5590..7e8f65e5448f 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -82,6 +82,7 @@ static struct iscsi_login *iscsi_login_init_conn(struct iscsi_conn *conn)
82 init_completion(&conn->conn_logout_comp); 82 init_completion(&conn->conn_logout_comp);
83 init_completion(&conn->rx_half_close_comp); 83 init_completion(&conn->rx_half_close_comp);
84 init_completion(&conn->tx_half_close_comp); 84 init_completion(&conn->tx_half_close_comp);
85 init_completion(&conn->rx_login_comp);
85 spin_lock_init(&conn->cmd_lock); 86 spin_lock_init(&conn->cmd_lock);
86 spin_lock_init(&conn->conn_usage_lock); 87 spin_lock_init(&conn->conn_usage_lock);
87 spin_lock_init(&conn->immed_queue_lock); 88 spin_lock_init(&conn->immed_queue_lock);
@@ -644,7 +645,7 @@ static void iscsi_post_login_start_timers(struct iscsi_conn *conn)
644 iscsit_start_nopin_timer(conn); 645 iscsit_start_nopin_timer(conn);
645} 646}
646 647
647static int iscsit_start_kthreads(struct iscsi_conn *conn) 648int iscsit_start_kthreads(struct iscsi_conn *conn)
648{ 649{
649 int ret = 0; 650 int ret = 0;
650 651
@@ -679,6 +680,7 @@ static int iscsit_start_kthreads(struct iscsi_conn *conn)
679 680
680 return 0; 681 return 0;
681out_tx: 682out_tx:
683 send_sig(SIGINT, conn->tx_thread, 1);
682 kthread_stop(conn->tx_thread); 684 kthread_stop(conn->tx_thread);
683 conn->tx_thread_active = false; 685 conn->tx_thread_active = false;
684out_bitmap: 686out_bitmap:
@@ -689,7 +691,7 @@ out_bitmap:
689 return ret; 691 return ret;
690} 692}
691 693
692int iscsi_post_login_handler( 694void iscsi_post_login_handler(
693 struct iscsi_np *np, 695 struct iscsi_np *np,
694 struct iscsi_conn *conn, 696 struct iscsi_conn *conn,
695 u8 zero_tsih) 697 u8 zero_tsih)
@@ -699,7 +701,6 @@ int iscsi_post_login_handler(
699 struct se_session *se_sess = sess->se_sess; 701 struct se_session *se_sess = sess->se_sess;
700 struct iscsi_portal_group *tpg = sess->tpg; 702 struct iscsi_portal_group *tpg = sess->tpg;
701 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; 703 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
702 int rc;
703 704
704 iscsit_inc_conn_usage_count(conn); 705 iscsit_inc_conn_usage_count(conn);
705 706
@@ -739,10 +740,6 @@ int iscsi_post_login_handler(
739 sess->sess_ops->InitiatorName); 740 sess->sess_ops->InitiatorName);
740 spin_unlock_bh(&sess->conn_lock); 741 spin_unlock_bh(&sess->conn_lock);
741 742
742 rc = iscsit_start_kthreads(conn);
743 if (rc)
744 return rc;
745
746 iscsi_post_login_start_timers(conn); 743 iscsi_post_login_start_timers(conn);
747 /* 744 /*
748 * Determine CPU mask to ensure connection's RX and TX kthreads 745 * Determine CPU mask to ensure connection's RX and TX kthreads
@@ -751,15 +748,20 @@ int iscsi_post_login_handler(
751 iscsit_thread_get_cpumask(conn); 748 iscsit_thread_get_cpumask(conn);
752 conn->conn_rx_reset_cpumask = 1; 749 conn->conn_rx_reset_cpumask = 1;
753 conn->conn_tx_reset_cpumask = 1; 750 conn->conn_tx_reset_cpumask = 1;
754 751 /*
752 * Wakeup the sleeping iscsi_target_rx_thread() now that
753 * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state.
754 */
755 complete(&conn->rx_login_comp);
755 iscsit_dec_conn_usage_count(conn); 756 iscsit_dec_conn_usage_count(conn);
757
756 if (stop_timer) { 758 if (stop_timer) {
757 spin_lock_bh(&se_tpg->session_lock); 759 spin_lock_bh(&se_tpg->session_lock);
758 iscsit_stop_time2retain_timer(sess); 760 iscsit_stop_time2retain_timer(sess);
759 spin_unlock_bh(&se_tpg->session_lock); 761 spin_unlock_bh(&se_tpg->session_lock);
760 } 762 }
761 iscsit_dec_session_usage_count(sess); 763 iscsit_dec_session_usage_count(sess);
762 return 0; 764 return;
763 } 765 }
764 766
765 iscsi_set_session_parameters(sess->sess_ops, conn->param_list, 1); 767 iscsi_set_session_parameters(sess->sess_ops, conn->param_list, 1);
@@ -800,10 +802,6 @@ int iscsi_post_login_handler(
800 " iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt); 802 " iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt);
801 spin_unlock_bh(&se_tpg->session_lock); 803 spin_unlock_bh(&se_tpg->session_lock);
802 804
803 rc = iscsit_start_kthreads(conn);
804 if (rc)
805 return rc;
806
807 iscsi_post_login_start_timers(conn); 805 iscsi_post_login_start_timers(conn);
808 /* 806 /*
809 * Determine CPU mask to ensure connection's RX and TX kthreads 807 * Determine CPU mask to ensure connection's RX and TX kthreads
@@ -812,10 +810,12 @@ int iscsi_post_login_handler(
812 iscsit_thread_get_cpumask(conn); 810 iscsit_thread_get_cpumask(conn);
813 conn->conn_rx_reset_cpumask = 1; 811 conn->conn_rx_reset_cpumask = 1;
814 conn->conn_tx_reset_cpumask = 1; 812 conn->conn_tx_reset_cpumask = 1;
815 813 /*
814 * Wakeup the sleeping iscsi_target_rx_thread() now that
815 * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state.
816 */
817 complete(&conn->rx_login_comp);
816 iscsit_dec_conn_usage_count(conn); 818 iscsit_dec_conn_usage_count(conn);
817
818 return 0;
819} 819}
820 820
821static void iscsi_handle_login_thread_timeout(unsigned long data) 821static void iscsi_handle_login_thread_timeout(unsigned long data)
@@ -1380,23 +1380,12 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
1380 if (ret < 0) 1380 if (ret < 0)
1381 goto new_sess_out; 1381 goto new_sess_out;
1382 1382
1383 if (!conn->sess) {
1384 pr_err("struct iscsi_conn session pointer is NULL!\n");
1385 goto new_sess_out;
1386 }
1387
1388 iscsi_stop_login_thread_timer(np); 1383 iscsi_stop_login_thread_timer(np);
1389 1384
1390 if (signal_pending(current))
1391 goto new_sess_out;
1392
1393 if (ret == 1) { 1385 if (ret == 1) {
1394 tpg_np = conn->tpg_np; 1386 tpg_np = conn->tpg_np;
1395 1387
1396 ret = iscsi_post_login_handler(np, conn, zero_tsih); 1388 iscsi_post_login_handler(np, conn, zero_tsih);
1397 if (ret < 0)
1398 goto new_sess_out;
1399
1400 iscsit_deaccess_np(np, tpg, tpg_np); 1389 iscsit_deaccess_np(np, tpg, tpg_np);
1401 } 1390 }
1402 1391
diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h
index 1c7358081533..57aa0d0fd820 100644
--- a/drivers/target/iscsi/iscsi_target_login.h
+++ b/drivers/target/iscsi/iscsi_target_login.h
@@ -12,7 +12,8 @@ extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *);
12extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *); 12extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
13extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32); 13extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
14extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *); 14extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *);
15extern int iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8); 15extern int iscsit_start_kthreads(struct iscsi_conn *);
16extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8);
16extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *, 17extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *,
17 bool, bool); 18 bool, bool);
18extern int iscsi_target_login_thread(void *); 19extern int iscsi_target_login_thread(void *);
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 8c02fa34716f..f9cde9141836 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -17,6 +17,7 @@
17 ******************************************************************************/ 17 ******************************************************************************/
18 18
19#include <linux/ctype.h> 19#include <linux/ctype.h>
20#include <linux/kthread.h>
20#include <scsi/iscsi_proto.h> 21#include <scsi/iscsi_proto.h>
21#include <target/target_core_base.h> 22#include <target/target_core_base.h>
22#include <target/target_core_fabric.h> 23#include <target/target_core_fabric.h>
@@ -361,10 +362,24 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log
361 ntohl(login_rsp->statsn), login->rsp_length); 362 ntohl(login_rsp->statsn), login->rsp_length);
362 363
363 padding = ((-login->rsp_length) & 3); 364 padding = ((-login->rsp_length) & 3);
365 /*
366 * Before sending the last login response containing the transition
367 * bit for full-feature-phase, go ahead and start up TX/RX threads
368 * now to avoid potential resource allocation failures after the
369 * final login response has been sent.
370 */
371 if (login->login_complete) {
372 int rc = iscsit_start_kthreads(conn);
373 if (rc) {
374 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
375 ISCSI_LOGIN_STATUS_NO_RESOURCES);
376 return -1;
377 }
378 }
364 379
365 if (conn->conn_transport->iscsit_put_login_tx(conn, login, 380 if (conn->conn_transport->iscsit_put_login_tx(conn, login,
366 login->rsp_length + padding) < 0) 381 login->rsp_length + padding) < 0)
367 return -1; 382 goto err;
368 383
369 login->rsp_length = 0; 384 login->rsp_length = 0;
370 mutex_lock(&sess->cmdsn_mutex); 385 mutex_lock(&sess->cmdsn_mutex);
@@ -373,6 +388,23 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log
373 mutex_unlock(&sess->cmdsn_mutex); 388 mutex_unlock(&sess->cmdsn_mutex);
374 389
375 return 0; 390 return 0;
391
392err:
393 if (login->login_complete) {
394 if (conn->rx_thread && conn->rx_thread_active) {
395 send_sig(SIGINT, conn->rx_thread, 1);
396 kthread_stop(conn->rx_thread);
397 }
398 if (conn->tx_thread && conn->tx_thread_active) {
399 send_sig(SIGINT, conn->tx_thread, 1);
400 kthread_stop(conn->tx_thread);
401 }
402 spin_lock(&iscsit_global->ts_bitmap_lock);
403 bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
404 get_order(1));
405 spin_unlock(&iscsit_global->ts_bitmap_lock);
406 }
407 return -1;
376} 408}
377 409
378static void iscsi_target_sk_data_ready(struct sock *sk) 410static void iscsi_target_sk_data_ready(struct sock *sk)
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 0b0de3647478..860e84046177 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -457,8 +457,15 @@ void target_unregister_template(const struct target_core_fabric_ops *fo)
457 if (!strcmp(t->tf_ops->name, fo->name)) { 457 if (!strcmp(t->tf_ops->name, fo->name)) {
458 BUG_ON(atomic_read(&t->tf_access_cnt)); 458 BUG_ON(atomic_read(&t->tf_access_cnt));
459 list_del(&t->tf_list); 459 list_del(&t->tf_list);
460 mutex_unlock(&g_tf_lock);
461 /*
462 * Wait for any outstanding fabric se_deve_entry->rcu_head
463 * callbacks to complete post kfree_rcu(), before allowing
464 * fabric driver unload of TFO->module to proceed.
465 */
466 rcu_barrier();
460 kfree(t); 467 kfree(t);
461 break; 468 return;
462 } 469 }
463 } 470 }
464 mutex_unlock(&g_tf_lock); 471 mutex_unlock(&g_tf_lock);
@@ -747,7 +754,7 @@ static ssize_t store_pi_prot_type(struct se_dev_attrib *da,
747 if (!dev->transport->init_prot || !dev->transport->free_prot) { 754 if (!dev->transport->init_prot || !dev->transport->free_prot) {
748 /* 0 is only allowed value for non-supporting backends */ 755 /* 0 is only allowed value for non-supporting backends */
749 if (flag == 0) 756 if (flag == 0)
750 return 0; 757 return count;
751 758
752 pr_err("DIF protection not supported by backend: %s\n", 759 pr_err("DIF protection not supported by backend: %s\n",
753 dev->transport->name); 760 dev->transport->name);
@@ -1590,9 +1597,9 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1590 u8 type = 0; 1597 u8 type = 0;
1591 1598
1592 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 1599 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1593 return 0; 1600 return count;
1594 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) 1601 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
1595 return 0; 1602 return count;
1596 1603
1597 if (dev->export_count) { 1604 if (dev->export_count) {
1598 pr_debug("Unable to process APTPL metadata while" 1605 pr_debug("Unable to process APTPL metadata while"
@@ -1658,22 +1665,32 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1658 * PR APTPL Metadata for Reservation 1665 * PR APTPL Metadata for Reservation
1659 */ 1666 */
1660 case Opt_res_holder: 1667 case Opt_res_holder:
1661 match_int(args, &arg); 1668 ret = match_int(args, &arg);
1669 if (ret)
1670 goto out;
1662 res_holder = arg; 1671 res_holder = arg;
1663 break; 1672 break;
1664 case Opt_res_type: 1673 case Opt_res_type:
1665 match_int(args, &arg); 1674 ret = match_int(args, &arg);
1675 if (ret)
1676 goto out;
1666 type = (u8)arg; 1677 type = (u8)arg;
1667 break; 1678 break;
1668 case Opt_res_scope: 1679 case Opt_res_scope:
1669 match_int(args, &arg); 1680 ret = match_int(args, &arg);
1681 if (ret)
1682 goto out;
1670 break; 1683 break;
1671 case Opt_res_all_tg_pt: 1684 case Opt_res_all_tg_pt:
1672 match_int(args, &arg); 1685 ret = match_int(args, &arg);
1686 if (ret)
1687 goto out;
1673 all_tg_pt = (int)arg; 1688 all_tg_pt = (int)arg;
1674 break; 1689 break;
1675 case Opt_mapped_lun: 1690 case Opt_mapped_lun:
1676 match_int(args, &arg); 1691 ret = match_int(args, &arg);
1692 if (ret)
1693 goto out;
1677 mapped_lun = (u64)arg; 1694 mapped_lun = (u64)arg;
1678 break; 1695 break;
1679 /* 1696 /*
@@ -1701,14 +1718,20 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1701 } 1718 }
1702 break; 1719 break;
1703 case Opt_tpgt: 1720 case Opt_tpgt:
1704 match_int(args, &arg); 1721 ret = match_int(args, &arg);
1722 if (ret)
1723 goto out;
1705 tpgt = (u16)arg; 1724 tpgt = (u16)arg;
1706 break; 1725 break;
1707 case Opt_port_rtpi: 1726 case Opt_port_rtpi:
1708 match_int(args, &arg); 1727 ret = match_int(args, &arg);
1728 if (ret)
1729 goto out;
1709 break; 1730 break;
1710 case Opt_target_lun: 1731 case Opt_target_lun:
1711 match_int(args, &arg); 1732 ret = match_int(args, &arg);
1733 if (ret)
1734 goto out;
1712 target_lun = (u64)arg; 1735 target_lun = (u64)arg;
1713 break; 1736 break;
1714 default: 1737 default:
@@ -1985,7 +2008,7 @@ static ssize_t target_core_store_alua_lu_gp(
1985 2008
1986 lu_gp_mem = dev->dev_alua_lu_gp_mem; 2009 lu_gp_mem = dev->dev_alua_lu_gp_mem;
1987 if (!lu_gp_mem) 2010 if (!lu_gp_mem)
1988 return 0; 2011 return count;
1989 2012
1990 if (count > LU_GROUP_NAME_BUF) { 2013 if (count > LU_GROUP_NAME_BUF) {
1991 pr_err("ALUA LU Group Alias too large!\n"); 2014 pr_err("ALUA LU Group Alias too large!\n");
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
index 62ea4e8e70a8..be9cefc07407 100644
--- a/drivers/target/target_core_hba.c
+++ b/drivers/target/target_core_hba.c
@@ -84,8 +84,16 @@ void target_backend_unregister(const struct target_backend_ops *ops)
84 list_for_each_entry(tb, &backend_list, list) { 84 list_for_each_entry(tb, &backend_list, list) {
85 if (tb->ops == ops) { 85 if (tb->ops == ops) {
86 list_del(&tb->list); 86 list_del(&tb->list);
87 mutex_unlock(&backend_mutex);
88 /*
89 * Wait for any outstanding backend driver ->rcu_head
90 * callbacks to complete post TBO->free_device() ->
91 * call_rcu(), before allowing backend driver module
92 * unload of target_backend_ops->owner to proceed.
93 */
94 rcu_barrier();
87 kfree(tb); 95 kfree(tb);
88 break; 96 return;
89 } 97 }
90 } 98 }
91 mutex_unlock(&backend_mutex); 99 mutex_unlock(&backend_mutex);
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 0fdbe43b7dad..5ab7100de17e 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -1474,7 +1474,7 @@ core_scsi3_decode_spec_i_port(
1474 LIST_HEAD(tid_dest_list); 1474 LIST_HEAD(tid_dest_list);
1475 struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp; 1475 struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp;
1476 unsigned char *buf, *ptr, proto_ident; 1476 unsigned char *buf, *ptr, proto_ident;
1477 const unsigned char *i_str; 1477 const unsigned char *i_str = NULL;
1478 char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN]; 1478 char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN];
1479 sense_reason_t ret; 1479 sense_reason_t ret;
1480 u32 tpdl, tid_len = 0; 1480 u32 tpdl, tid_len = 0;
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 4703f403f31c..384cf8894411 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -333,6 +333,7 @@ static int rd_configure_device(struct se_device *dev)
333 dev->dev_attrib.hw_block_size = RD_BLOCKSIZE; 333 dev->dev_attrib.hw_block_size = RD_BLOCKSIZE;
334 dev->dev_attrib.hw_max_sectors = UINT_MAX; 334 dev->dev_attrib.hw_max_sectors = UINT_MAX;
335 dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH; 335 dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
336 dev->dev_attrib.is_nonrot = 1;
336 337
337 rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++; 338 rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
338 339
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index b0744433315a..f87d4cef6d39 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -454,10 +454,17 @@ spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
454 cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE1_PROT) 454 cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE1_PROT)
455 buf[4] = 0x5; 455 buf[4] = 0x5;
456 else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT || 456 else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT ||
457 cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE3_PROT) 457 cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE3_PROT)
458 buf[4] = 0x4; 458 buf[4] = 0x4;
459 } 459 }
460 460
461 /* logical unit supports type 1 and type 3 protection */
462 if ((dev->transport->get_device_type(dev) == TYPE_DISK) &&
463 (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) &&
464 (dev->dev_attrib.pi_prot_type || cmd->se_sess->sess_prot_type)) {
465 buf[4] |= (0x3 << 3);
466 }
467
461 /* Set HEADSUP, ORDSUP, SIMPSUP */ 468 /* Set HEADSUP, ORDSUP, SIMPSUP */
462 buf[5] = 0x07; 469 buf[5] = 0x07;
463 470
@@ -1196,17 +1203,13 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
1196 struct se_dev_entry *deve; 1203 struct se_dev_entry *deve;
1197 struct se_session *sess = cmd->se_sess; 1204 struct se_session *sess = cmd->se_sess;
1198 struct se_node_acl *nacl; 1205 struct se_node_acl *nacl;
1206 struct scsi_lun slun;
1199 unsigned char *buf; 1207 unsigned char *buf;
1200 u32 lun_count = 0, offset = 8; 1208 u32 lun_count = 0, offset = 8;
1201 1209 __be32 len;
1202 if (cmd->data_length < 16) {
1203 pr_warn("REPORT LUNS allocation length %u too small\n",
1204 cmd->data_length);
1205 return TCM_INVALID_CDB_FIELD;
1206 }
1207 1210
1208 buf = transport_kmap_data_sg(cmd); 1211 buf = transport_kmap_data_sg(cmd);
1209 if (!buf) 1212 if (cmd->data_length && !buf)
1210 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1213 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1211 1214
1212 /* 1215 /*
@@ -1214,11 +1217,9 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
1214 * coming via a target_core_mod PASSTHROUGH op, and not through 1217 * coming via a target_core_mod PASSTHROUGH op, and not through
1215 * a $FABRIC_MOD. In that case, report LUN=0 only. 1218 * a $FABRIC_MOD. In that case, report LUN=0 only.
1216 */ 1219 */
1217 if (!sess) { 1220 if (!sess)
1218 int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
1219 lun_count = 1;
1220 goto done; 1221 goto done;
1221 } 1222
1222 nacl = sess->se_node_acl; 1223 nacl = sess->se_node_acl;
1223 1224
1224 rcu_read_lock(); 1225 rcu_read_lock();
@@ -1229,10 +1230,12 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
1229 * See SPC2-R20 7.19. 1230 * See SPC2-R20 7.19.
1230 */ 1231 */
1231 lun_count++; 1232 lun_count++;
1232 if ((offset + 8) > cmd->data_length) 1233 if (offset >= cmd->data_length)
1233 continue; 1234 continue;
1234 1235
1235 int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]); 1236 int_to_scsilun(deve->mapped_lun, &slun);
1237 memcpy(buf + offset, &slun,
1238 min(8u, cmd->data_length - offset));
1236 offset += 8; 1239 offset += 8;
1237 } 1240 }
1238 rcu_read_unlock(); 1241 rcu_read_unlock();
@@ -1241,12 +1244,22 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
1241 * See SPC3 r07, page 159. 1244 * See SPC3 r07, page 159.
1242 */ 1245 */
1243done: 1246done:
1244 lun_count *= 8; 1247 /*
1245 buf[0] = ((lun_count >> 24) & 0xff); 1248 * If no LUNs are accessible, report virtual LUN 0.
1246 buf[1] = ((lun_count >> 16) & 0xff); 1249 */
1247 buf[2] = ((lun_count >> 8) & 0xff); 1250 if (lun_count == 0) {
1248 buf[3] = (lun_count & 0xff); 1251 int_to_scsilun(0, &slun);
1249 transport_kunmap_data_sg(cmd); 1252 if (cmd->data_length > 8)
1253 memcpy(buf + offset, &slun,
1254 min(8u, cmd->data_length - offset));
1255 lun_count = 1;
1256 }
1257
1258 if (buf) {
1259 len = cpu_to_be32(lun_count * 8);
1260 memcpy(buf, &len, min_t(int, sizeof len, cmd->data_length));
1261 transport_kunmap_data_sg(cmd);
1262 }
1250 1263
1251 target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8); 1264 target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8);
1252 return 0; 1265 return 0;
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 6509c61b9648..620dcd405ff6 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -68,7 +68,7 @@ struct power_table {
68 * registered cooling device. 68 * registered cooling device.
69 * @cpufreq_state: integer value representing the current state of cpufreq 69 * @cpufreq_state: integer value representing the current state of cpufreq
70 * cooling devices. 70 * cooling devices.
71 * @cpufreq_val: integer value representing the absolute value of the clipped 71 * @clipped_freq: integer value representing the absolute value of the clipped
72 * frequency. 72 * frequency.
73 * @max_level: maximum cooling level. One less than total number of valid 73 * @max_level: maximum cooling level. One less than total number of valid
74 * cpufreq frequencies. 74 * cpufreq frequencies.
@@ -91,7 +91,7 @@ struct cpufreq_cooling_device {
91 int id; 91 int id;
92 struct thermal_cooling_device *cool_dev; 92 struct thermal_cooling_device *cool_dev;
93 unsigned int cpufreq_state; 93 unsigned int cpufreq_state;
94 unsigned int cpufreq_val; 94 unsigned int clipped_freq;
95 unsigned int max_level; 95 unsigned int max_level;
96 unsigned int *freq_table; /* In descending order */ 96 unsigned int *freq_table; /* In descending order */
97 struct cpumask allowed_cpus; 97 struct cpumask allowed_cpus;
@@ -107,6 +107,9 @@ struct cpufreq_cooling_device {
107static DEFINE_IDR(cpufreq_idr); 107static DEFINE_IDR(cpufreq_idr);
108static DEFINE_MUTEX(cooling_cpufreq_lock); 108static DEFINE_MUTEX(cooling_cpufreq_lock);
109 109
110static unsigned int cpufreq_dev_count;
111
112static DEFINE_MUTEX(cooling_list_lock);
110static LIST_HEAD(cpufreq_dev_list); 113static LIST_HEAD(cpufreq_dev_list);
111 114
112/** 115/**
@@ -185,14 +188,14 @@ unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq)
185{ 188{
186 struct cpufreq_cooling_device *cpufreq_dev; 189 struct cpufreq_cooling_device *cpufreq_dev;
187 190
188 mutex_lock(&cooling_cpufreq_lock); 191 mutex_lock(&cooling_list_lock);
189 list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) { 192 list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
190 if (cpumask_test_cpu(cpu, &cpufreq_dev->allowed_cpus)) { 193 if (cpumask_test_cpu(cpu, &cpufreq_dev->allowed_cpus)) {
191 mutex_unlock(&cooling_cpufreq_lock); 194 mutex_unlock(&cooling_list_lock);
192 return get_level(cpufreq_dev, freq); 195 return get_level(cpufreq_dev, freq);
193 } 196 }
194 } 197 }
195 mutex_unlock(&cooling_cpufreq_lock); 198 mutex_unlock(&cooling_list_lock);
196 199
197 pr_err("%s: cpu:%d not part of any cooling device\n", __func__, cpu); 200 pr_err("%s: cpu:%d not part of any cooling device\n", __func__, cpu);
198 return THERMAL_CSTATE_INVALID; 201 return THERMAL_CSTATE_INVALID;
@@ -215,29 +218,35 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
215 unsigned long event, void *data) 218 unsigned long event, void *data)
216{ 219{
217 struct cpufreq_policy *policy = data; 220 struct cpufreq_policy *policy = data;
218 unsigned long max_freq = 0; 221 unsigned long clipped_freq;
219 struct cpufreq_cooling_device *cpufreq_dev; 222 struct cpufreq_cooling_device *cpufreq_dev;
220 223
221 switch (event) { 224 if (event != CPUFREQ_ADJUST)
225 return NOTIFY_DONE;
222 226
223 case CPUFREQ_ADJUST: 227 mutex_lock(&cooling_list_lock);
224 mutex_lock(&cooling_cpufreq_lock); 228 list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
225 list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) { 229 if (!cpumask_test_cpu(policy->cpu, &cpufreq_dev->allowed_cpus))
226 if (!cpumask_test_cpu(policy->cpu, 230 continue;
227 &cpufreq_dev->allowed_cpus))
228 continue;
229 231
230 max_freq = cpufreq_dev->cpufreq_val; 232 /*
233 * policy->max is the maximum allowed frequency defined by user
234 * and clipped_freq is the maximum that thermal constraints
235 * allow.
236 *
237 * If clipped_freq is lower than policy->max, then we need to
238 * readjust policy->max.
239 *
240 * But, if clipped_freq is greater than policy->max, we don't
241 * need to do anything.
242 */
243 clipped_freq = cpufreq_dev->clipped_freq;
231 244
232 if (policy->max != max_freq) 245 if (policy->max > clipped_freq)
233 cpufreq_verify_within_limits(policy, 0, 246 cpufreq_verify_within_limits(policy, 0, clipped_freq);
234 max_freq);
235 }
236 mutex_unlock(&cooling_cpufreq_lock);
237 break; 247 break;
238 default:
239 return NOTIFY_DONE;
240 } 248 }
249 mutex_unlock(&cooling_list_lock);
241 250
242 return NOTIFY_OK; 251 return NOTIFY_OK;
243} 252}
@@ -519,7 +528,7 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
519 528
520 clip_freq = cpufreq_device->freq_table[state]; 529 clip_freq = cpufreq_device->freq_table[state];
521 cpufreq_device->cpufreq_state = state; 530 cpufreq_device->cpufreq_state = state;
522 cpufreq_device->cpufreq_val = clip_freq; 531 cpufreq_device->clipped_freq = clip_freq;
523 532
524 cpufreq_update_policy(cpu); 533 cpufreq_update_policy(cpu);
525 534
@@ -861,17 +870,19 @@ __cpufreq_cooling_register(struct device_node *np,
861 pr_debug("%s: freq:%u KHz\n", __func__, freq); 870 pr_debug("%s: freq:%u KHz\n", __func__, freq);
862 } 871 }
863 872
864 cpufreq_dev->cpufreq_val = cpufreq_dev->freq_table[0]; 873 cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0];
865 cpufreq_dev->cool_dev = cool_dev; 874 cpufreq_dev->cool_dev = cool_dev;
866 875
867 mutex_lock(&cooling_cpufreq_lock); 876 mutex_lock(&cooling_cpufreq_lock);
868 877
878 mutex_lock(&cooling_list_lock);
879 list_add(&cpufreq_dev->node, &cpufreq_dev_list);
880 mutex_unlock(&cooling_list_lock);
881
869 /* Register the notifier for first cpufreq cooling device */ 882 /* Register the notifier for first cpufreq cooling device */
870 if (list_empty(&cpufreq_dev_list)) 883 if (!cpufreq_dev_count++)
871 cpufreq_register_notifier(&thermal_cpufreq_notifier_block, 884 cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
872 CPUFREQ_POLICY_NOTIFIER); 885 CPUFREQ_POLICY_NOTIFIER);
873 list_add(&cpufreq_dev->node, &cpufreq_dev_list);
874
875 mutex_unlock(&cooling_cpufreq_lock); 886 mutex_unlock(&cooling_cpufreq_lock);
876 887
877 return cool_dev; 888 return cool_dev;
@@ -1013,13 +1024,17 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
1013 return; 1024 return;
1014 1025
1015 cpufreq_dev = cdev->devdata; 1026 cpufreq_dev = cdev->devdata;
1016 mutex_lock(&cooling_cpufreq_lock);
1017 list_del(&cpufreq_dev->node);
1018 1027
1019 /* Unregister the notifier for the last cpufreq cooling device */ 1028 /* Unregister the notifier for the last cpufreq cooling device */
1020 if (list_empty(&cpufreq_dev_list)) 1029 mutex_lock(&cooling_cpufreq_lock);
1030 if (!--cpufreq_dev_count)
1021 cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block, 1031 cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
1022 CPUFREQ_POLICY_NOTIFIER); 1032 CPUFREQ_POLICY_NOTIFIER);
1033
1034 mutex_lock(&cooling_list_lock);
1035 list_del(&cpufreq_dev->node);
1036 mutex_unlock(&cooling_list_lock);
1037
1023 mutex_unlock(&cooling_cpufreq_lock); 1038 mutex_unlock(&cooling_cpufreq_lock);
1024 1039
1025 thermal_cooling_device_unregister(cpufreq_dev->cool_dev); 1040 thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index d5dd357ba57c..b49f97c734d0 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -405,7 +405,6 @@ static SIMPLE_DEV_PM_OPS(hisi_thermal_pm_ops,
405static struct platform_driver hisi_thermal_driver = { 405static struct platform_driver hisi_thermal_driver = {
406 .driver = { 406 .driver = {
407 .name = "hisi_thermal", 407 .name = "hisi_thermal",
408 .owner = THIS_MODULE,
409 .pm = &hisi_thermal_pm_ops, 408 .pm = &hisi_thermal_pm_ops,
410 .of_match_table = of_hisi_thermal_match, 409 .of_match_table = of_hisi_thermal_match,
411 }, 410 },
diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c
index 4672250b329f..7006860f2f36 100644
--- a/drivers/thermal/power_allocator.c
+++ b/drivers/thermal/power_allocator.c
@@ -229,7 +229,8 @@ static int allocate_power(struct thermal_zone_device *tz,
229 struct thermal_instance *instance; 229 struct thermal_instance *instance;
230 struct power_allocator_params *params = tz->governor_data; 230 struct power_allocator_params *params = tz->governor_data;
231 u32 *req_power, *max_power, *granted_power, *extra_actor_power; 231 u32 *req_power, *max_power, *granted_power, *extra_actor_power;
232 u32 total_req_power, max_allocatable_power; 232 u32 *weighted_req_power;
233 u32 total_req_power, max_allocatable_power, total_weighted_req_power;
233 u32 total_granted_power, power_range; 234 u32 total_granted_power, power_range;
234 int i, num_actors, total_weight, ret = 0; 235 int i, num_actors, total_weight, ret = 0;
235 int trip_max_desired_temperature = params->trip_max_desired_temperature; 236 int trip_max_desired_temperature = params->trip_max_desired_temperature;
@@ -247,16 +248,17 @@ static int allocate_power(struct thermal_zone_device *tz,
247 } 248 }
248 249
249 /* 250 /*
250 * We need to allocate three arrays of the same size: 251 * We need to allocate five arrays of the same size:
251 * req_power, max_power and granted_power. They are going to 252 * req_power, max_power, granted_power, extra_actor_power and
252 * be needed until this function returns. Allocate them all 253 * weighted_req_power. They are going to be needed until this
253 * in one go to simplify the allocation and deallocation 254 * function returns. Allocate them all in one go to simplify
254 * logic. 255 * the allocation and deallocation logic.
255 */ 256 */
256 BUILD_BUG_ON(sizeof(*req_power) != sizeof(*max_power)); 257 BUILD_BUG_ON(sizeof(*req_power) != sizeof(*max_power));
257 BUILD_BUG_ON(sizeof(*req_power) != sizeof(*granted_power)); 258 BUILD_BUG_ON(sizeof(*req_power) != sizeof(*granted_power));
258 BUILD_BUG_ON(sizeof(*req_power) != sizeof(*extra_actor_power)); 259 BUILD_BUG_ON(sizeof(*req_power) != sizeof(*extra_actor_power));
259 req_power = devm_kcalloc(&tz->device, num_actors * 4, 260 BUILD_BUG_ON(sizeof(*req_power) != sizeof(*weighted_req_power));
261 req_power = devm_kcalloc(&tz->device, num_actors * 5,
260 sizeof(*req_power), GFP_KERNEL); 262 sizeof(*req_power), GFP_KERNEL);
261 if (!req_power) { 263 if (!req_power) {
262 ret = -ENOMEM; 264 ret = -ENOMEM;
@@ -266,8 +268,10 @@ static int allocate_power(struct thermal_zone_device *tz,
266 max_power = &req_power[num_actors]; 268 max_power = &req_power[num_actors];
267 granted_power = &req_power[2 * num_actors]; 269 granted_power = &req_power[2 * num_actors];
268 extra_actor_power = &req_power[3 * num_actors]; 270 extra_actor_power = &req_power[3 * num_actors];
271 weighted_req_power = &req_power[4 * num_actors];
269 272
270 i = 0; 273 i = 0;
274 total_weighted_req_power = 0;
271 total_req_power = 0; 275 total_req_power = 0;
272 max_allocatable_power = 0; 276 max_allocatable_power = 0;
273 277
@@ -289,13 +293,14 @@ static int allocate_power(struct thermal_zone_device *tz,
289 else 293 else
290 weight = instance->weight; 294 weight = instance->weight;
291 295
292 req_power[i] = frac_to_int(weight * req_power[i]); 296 weighted_req_power[i] = frac_to_int(weight * req_power[i]);
293 297
294 if (power_actor_get_max_power(cdev, tz, &max_power[i])) 298 if (power_actor_get_max_power(cdev, tz, &max_power[i]))
295 continue; 299 continue;
296 300
297 total_req_power += req_power[i]; 301 total_req_power += req_power[i];
298 max_allocatable_power += max_power[i]; 302 max_allocatable_power += max_power[i];
303 total_weighted_req_power += weighted_req_power[i];
299 304
300 i++; 305 i++;
301 } 306 }
@@ -303,8 +308,9 @@ static int allocate_power(struct thermal_zone_device *tz,
303 power_range = pid_controller(tz, current_temp, control_temp, 308 power_range = pid_controller(tz, current_temp, control_temp,
304 max_allocatable_power); 309 max_allocatable_power);
305 310
306 divvy_up_power(req_power, max_power, num_actors, total_req_power, 311 divvy_up_power(weighted_req_power, max_power, num_actors,
307 power_range, granted_power, extra_actor_power); 312 total_weighted_req_power, power_range, granted_power,
313 extra_actor_power);
308 314
309 total_granted_power = 0; 315 total_granted_power = 0;
310 i = 0; 316 i = 0;
@@ -328,7 +334,7 @@ static int allocate_power(struct thermal_zone_device *tz,
328 max_allocatable_power, current_temp, 334 max_allocatable_power, current_temp,
329 (s32)control_temp - (s32)current_temp); 335 (s32)control_temp - (s32)current_temp);
330 336
331 devm_kfree(&tz->device, req_power); 337 kfree(req_power);
332unlock: 338unlock:
333 mutex_unlock(&tz->lock); 339 mutex_unlock(&tz->lock);
334 340
@@ -420,7 +426,7 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
420 return -EINVAL; 426 return -EINVAL;
421 } 427 }
422 428
423 params = devm_kzalloc(&tz->device, sizeof(*params), GFP_KERNEL); 429 params = kzalloc(sizeof(*params), GFP_KERNEL);
424 if (!params) 430 if (!params)
425 return -ENOMEM; 431 return -ENOMEM;
426 432
@@ -462,14 +468,14 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
462 return 0; 468 return 0;
463 469
464free: 470free:
465 devm_kfree(&tz->device, params); 471 kfree(params);
466 return ret; 472 return ret;
467} 473}
468 474
469static void power_allocator_unbind(struct thermal_zone_device *tz) 475static void power_allocator_unbind(struct thermal_zone_device *tz)
470{ 476{
471 dev_dbg(&tz->device, "Unbinding from thermal zone %d\n", tz->id); 477 dev_dbg(&tz->device, "Unbinding from thermal zone %d\n", tz->id);
472 devm_kfree(&tz->device, tz->governor_data); 478 kfree(tz->governor_data);
473 tz->governor_data = NULL; 479 tz->governor_data = NULL;
474} 480}
475 481
diff --git a/drivers/thermal/samsung/Kconfig b/drivers/thermal/samsung/Kconfig
index c8e35c1a43dc..e0da3865e060 100644
--- a/drivers/thermal/samsung/Kconfig
+++ b/drivers/thermal/samsung/Kconfig
@@ -1,6 +1,6 @@
1config EXYNOS_THERMAL 1config EXYNOS_THERMAL
2 tristate "Exynos thermal management unit driver" 2 tristate "Exynos thermal management unit driver"
3 depends on OF 3 depends on THERMAL_OF
4 help 4 help
5 If you say yes here you get support for the TMU (Thermal Management 5 If you say yes here you get support for the TMU (Thermal Management
6 Unit) driver for SAMSUNG EXYNOS series of SoCs. This driver initialises 6 Unit) driver for SAMSUNG EXYNOS series of SoCs. This driver initialises
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index 531f4b179871..c96ff10b869e 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -1296,7 +1296,6 @@ static struct thermal_zone_of_device_ops exynos_sensor_ops = {
1296 1296
1297static int exynos_tmu_probe(struct platform_device *pdev) 1297static int exynos_tmu_probe(struct platform_device *pdev)
1298{ 1298{
1299 struct exynos_tmu_platform_data *pdata;
1300 struct exynos_tmu_data *data; 1299 struct exynos_tmu_data *data;
1301 int ret; 1300 int ret;
1302 1301
@@ -1318,8 +1317,6 @@ static int exynos_tmu_probe(struct platform_device *pdev)
1318 if (ret) 1317 if (ret)
1319 goto err_sensor; 1318 goto err_sensor;
1320 1319
1321 pdata = data->pdata;
1322
1323 INIT_WORK(&data->irq_work, exynos_tmu_work); 1320 INIT_WORK(&data->irq_work, exynos_tmu_work);
1324 1321
1325 data->clk = devm_clk_get(&pdev->dev, "tmu_apbif"); 1322 data->clk = devm_clk_get(&pdev->dev, "tmu_apbif");
@@ -1392,6 +1389,8 @@ err_clk_sec:
1392 if (!IS_ERR(data->clk_sec)) 1389 if (!IS_ERR(data->clk_sec))
1393 clk_unprepare(data->clk_sec); 1390 clk_unprepare(data->clk_sec);
1394err_sensor: 1391err_sensor:
1392 if (!IS_ERR_OR_NULL(data->regulator))
1393 regulator_disable(data->regulator);
1395 thermal_zone_of_sensor_unregister(&pdev->dev, data->tzd); 1394 thermal_zone_of_sensor_unregister(&pdev->dev, data->tzd);
1396 1395
1397 return ret; 1396 return ret;
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 04659bfb888b..4ca211be4c0f 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -1333,6 +1333,7 @@ int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz,
1333 return -ENODEV; 1333 return -ENODEV;
1334 1334
1335unbind: 1335unbind:
1336 device_remove_file(&tz->device, &pos->weight_attr);
1336 device_remove_file(&tz->device, &pos->attr); 1337 device_remove_file(&tz->device, &pos->attr);
1337 sysfs_remove_link(&tz->device.kobj, pos->name); 1338 sysfs_remove_link(&tz->device.kobj, pos->name);
1338 release_idr(&tz->idr, &tz->lock, pos->id); 1339 release_idr(&tz->idr, &tz->lock, pos->id);
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index c9c27f69e101..ee8bfacf2071 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1108,19 +1108,29 @@ static void eraser(unsigned char c, struct tty_struct *tty)
1108 * Locking: ctrl_lock 1108 * Locking: ctrl_lock
1109 */ 1109 */
1110 1110
1111static void isig(int sig, struct tty_struct *tty) 1111static void __isig(int sig, struct tty_struct *tty)
1112{ 1112{
1113 struct n_tty_data *ldata = tty->disc_data;
1114 struct pid *tty_pgrp = tty_get_pgrp(tty); 1113 struct pid *tty_pgrp = tty_get_pgrp(tty);
1115 if (tty_pgrp) { 1114 if (tty_pgrp) {
1116 kill_pgrp(tty_pgrp, sig, 1); 1115 kill_pgrp(tty_pgrp, sig, 1);
1117 put_pid(tty_pgrp); 1116 put_pid(tty_pgrp);
1118 } 1117 }
1118}
1119 1119
1120 if (!L_NOFLSH(tty)) { 1120static void isig(int sig, struct tty_struct *tty)
1121{
1122 struct n_tty_data *ldata = tty->disc_data;
1123
1124 if (L_NOFLSH(tty)) {
1125 /* signal only */
1126 __isig(sig, tty);
1127
1128 } else { /* signal and flush */
1121 up_read(&tty->termios_rwsem); 1129 up_read(&tty->termios_rwsem);
1122 down_write(&tty->termios_rwsem); 1130 down_write(&tty->termios_rwsem);
1123 1131
1132 __isig(sig, tty);
1133
1124 /* clear echo buffer */ 1134 /* clear echo buffer */
1125 mutex_lock(&ldata->output_lock); 1135 mutex_lock(&ldata->output_lock);
1126 ldata->echo_head = ldata->echo_tail = 0; 1136 ldata->echo_head = ldata->echo_tail = 0;
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 76e65b714471..15b4079a335e 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1185,7 +1185,7 @@ config SERIAL_SC16IS7XX_CORE
1185config SERIAL_SC16IS7XX 1185config SERIAL_SC16IS7XX
1186 tristate "SC16IS7xx serial support" 1186 tristate "SC16IS7xx serial support"
1187 select SERIAL_CORE 1187 select SERIAL_CORE
1188 depends on I2C || SPI_MASTER 1188 depends on (SPI_MASTER && !I2C) || I2C
1189 help 1189 help
1190 This selects support for SC16IS7xx serial ports. 1190 This selects support for SC16IS7xx serial ports.
1191 Supported ICs are SC16IS740, SC16IS741, SC16IS750, SC16IS752, 1191 Supported ICs are SC16IS740, SC16IS741, SC16IS750, SC16IS752,
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 50cf5b10ceed..fd27e986b1dd 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2310,8 +2310,8 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap,
2310 void __iomem *base; 2310 void __iomem *base;
2311 2311
2312 base = devm_ioremap_resource(dev, mmiobase); 2312 base = devm_ioremap_resource(dev, mmiobase);
2313 if (!base) 2313 if (IS_ERR(base))
2314 return -ENOMEM; 2314 return PTR_ERR(base);
2315 2315
2316 index = pl011_probe_dt_alias(index, dev); 2316 index = pl011_probe_dt_alias(index, dev);
2317 2317
diff --git a/drivers/tty/serial/etraxfs-uart.c b/drivers/tty/serial/etraxfs-uart.c
index a57301a6fe42..679709f51fd4 100644
--- a/drivers/tty/serial/etraxfs-uart.c
+++ b/drivers/tty/serial/etraxfs-uart.c
@@ -950,7 +950,7 @@ static int etraxfs_uart_remove(struct platform_device *pdev)
950 950
951 port = platform_get_drvdata(pdev); 951 port = platform_get_drvdata(pdev);
952 uart_remove_one_port(&etraxfs_uart_driver, port); 952 uart_remove_one_port(&etraxfs_uart_driver, port);
953 etraxfs_uart_ports[pdev->id] = NULL; 953 etraxfs_uart_ports[port->line] = NULL;
954 954
955 return 0; 955 return 0;
956} 956}
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 2c90dc31bfaa..54fdc7866ea1 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -1121,11 +1121,6 @@ static int imx_startup(struct uart_port *port)
1121 1121
1122 writel(temp & ~UCR4_DREN, sport->port.membase + UCR4); 1122 writel(temp & ~UCR4_DREN, sport->port.membase + UCR4);
1123 1123
1124 /* Can we enable the DMA support? */
1125 if (is_imx6q_uart(sport) && !uart_console(port) &&
1126 !sport->dma_is_inited)
1127 imx_uart_dma_init(sport);
1128
1129 spin_lock_irqsave(&sport->port.lock, flags); 1124 spin_lock_irqsave(&sport->port.lock, flags);
1130 /* Reset fifo's and state machines */ 1125 /* Reset fifo's and state machines */
1131 i = 100; 1126 i = 100;
@@ -1143,9 +1138,6 @@ static int imx_startup(struct uart_port *port)
1143 writel(USR1_RTSD, sport->port.membase + USR1); 1138 writel(USR1_RTSD, sport->port.membase + USR1);
1144 writel(USR2_ORE, sport->port.membase + USR2); 1139 writel(USR2_ORE, sport->port.membase + USR2);
1145 1140
1146 if (sport->dma_is_inited && !sport->dma_is_enabled)
1147 imx_enable_dma(sport);
1148
1149 temp = readl(sport->port.membase + UCR1); 1141 temp = readl(sport->port.membase + UCR1);
1150 temp |= UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN; 1142 temp |= UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN;
1151 1143
@@ -1316,6 +1308,11 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
1316 } else { 1308 } else {
1317 ucr2 |= UCR2_CTSC; 1309 ucr2 |= UCR2_CTSC;
1318 } 1310 }
1311
1312 /* Can we enable the DMA support? */
1313 if (is_imx6q_uart(sport) && !uart_console(port)
1314 && !sport->dma_is_inited)
1315 imx_uart_dma_init(sport);
1319 } else { 1316 } else {
1320 termios->c_cflag &= ~CRTSCTS; 1317 termios->c_cflag &= ~CRTSCTS;
1321 } 1318 }
@@ -1432,6 +1429,8 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
1432 if (UART_ENABLE_MS(&sport->port, termios->c_cflag)) 1429 if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
1433 imx_enable_ms(&sport->port); 1430 imx_enable_ms(&sport->port);
1434 1431
1432 if (sport->dma_is_inited && !sport->dma_is_enabled)
1433 imx_enable_dma(sport);
1435 spin_unlock_irqrestore(&sport->port.lock, flags); 1434 spin_unlock_irqrestore(&sport->port.lock, flags);
1436} 1435}
1437 1436
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 9e6576004a42..5ccc698cbbfa 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -354,6 +354,26 @@ static void sc16is7xx_port_write(struct uart_port *port, u8 reg, u8 val)
354 (reg << SC16IS7XX_REG_SHIFT) | port->line, val); 354 (reg << SC16IS7XX_REG_SHIFT) | port->line, val);
355} 355}
356 356
357static void sc16is7xx_fifo_read(struct uart_port *port, unsigned int rxlen)
358{
359 struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
360 u8 addr = (SC16IS7XX_RHR_REG << SC16IS7XX_REG_SHIFT) | port->line;
361
362 regcache_cache_bypass(s->regmap, true);
363 regmap_raw_read(s->regmap, addr, s->buf, rxlen);
364 regcache_cache_bypass(s->regmap, false);
365}
366
367static void sc16is7xx_fifo_write(struct uart_port *port, u8 to_send)
368{
369 struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
370 u8 addr = (SC16IS7XX_THR_REG << SC16IS7XX_REG_SHIFT) | port->line;
371
372 regcache_cache_bypass(s->regmap, true);
373 regmap_raw_write(s->regmap, addr, s->buf, to_send);
374 regcache_cache_bypass(s->regmap, false);
375}
376
357static void sc16is7xx_port_update(struct uart_port *port, u8 reg, 377static void sc16is7xx_port_update(struct uart_port *port, u8 reg,
358 u8 mask, u8 val) 378 u8 mask, u8 val)
359{ 379{
@@ -508,10 +528,7 @@ static void sc16is7xx_handle_rx(struct uart_port *port, unsigned int rxlen,
508 s->buf[0] = sc16is7xx_port_read(port, SC16IS7XX_RHR_REG); 528 s->buf[0] = sc16is7xx_port_read(port, SC16IS7XX_RHR_REG);
509 bytes_read = 1; 529 bytes_read = 1;
510 } else { 530 } else {
511 regcache_cache_bypass(s->regmap, true); 531 sc16is7xx_fifo_read(port, rxlen);
512 regmap_raw_read(s->regmap, SC16IS7XX_RHR_REG,
513 s->buf, rxlen);
514 regcache_cache_bypass(s->regmap, false);
515 bytes_read = rxlen; 532 bytes_read = rxlen;
516 } 533 }
517 534
@@ -591,9 +608,8 @@ static void sc16is7xx_handle_tx(struct uart_port *port)
591 s->buf[i] = xmit->buf[xmit->tail]; 608 s->buf[i] = xmit->buf[xmit->tail];
592 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); 609 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
593 } 610 }
594 regcache_cache_bypass(s->regmap, true); 611
595 regmap_raw_write(s->regmap, SC16IS7XX_THR_REG, s->buf, to_send); 612 sc16is7xx_fifo_write(port, to_send);
596 regcache_cache_bypass(s->regmap, false);
597 } 613 }
598 614
599 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 615 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 7ae1592f7ec9..f36852067f20 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -1418,7 +1418,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
1418 mutex_lock(&port->mutex); 1418 mutex_lock(&port->mutex);
1419 uart_shutdown(tty, state); 1419 uart_shutdown(tty, state);
1420 tty_port_tty_set(port, NULL); 1420 tty_port_tty_set(port, NULL);
1421 tty->closing = 0; 1421
1422 spin_lock_irqsave(&port->lock, flags); 1422 spin_lock_irqsave(&port->lock, flags);
1423 1423
1424 if (port->blocked_open) { 1424 if (port->blocked_open) {
@@ -1444,6 +1444,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
1444 mutex_unlock(&port->mutex); 1444 mutex_unlock(&port->mutex);
1445 1445
1446 tty_ldisc_flush(tty); 1446 tty_ldisc_flush(tty);
1447 tty->closing = 0;
1447} 1448}
1448 1449
1449static void uart_wait_until_sent(struct tty_struct *tty, int timeout) 1450static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index ea27804d87af..381a2b13682c 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -356,6 +356,7 @@ int paste_selection(struct tty_struct *tty)
356 schedule(); 356 schedule();
357 continue; 357 continue;
358 } 358 }
359 __set_current_state(TASK_RUNNING);
359 count = sel_buffer_lth - pasted; 360 count = sel_buffer_lth - pasted;
360 count = tty_ldisc_receive_buf(ld, sel_buffer + pasted, NULL, 361 count = tty_ldisc_receive_buf(ld, sel_buffer + pasted, NULL,
361 count); 362 count);
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 8fe52989b380..4462d167900c 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -742,6 +742,8 @@ static void visual_init(struct vc_data *vc, int num, int init)
742 __module_get(vc->vc_sw->owner); 742 __module_get(vc->vc_sw->owner);
743 vc->vc_num = num; 743 vc->vc_num = num;
744 vc->vc_display_fg = &master_display_fg; 744 vc->vc_display_fg = &master_display_fg;
745 if (vc->vc_uni_pagedir_loc)
746 con_free_unimap(vc);
745 vc->vc_uni_pagedir_loc = &vc->vc_uni_pagedir; 747 vc->vc_uni_pagedir_loc = &vc->vc_uni_pagedir;
746 vc->vc_uni_pagedir = NULL; 748 vc->vc_uni_pagedir = NULL;
747 vc->vc_hi_font_mask = 0; 749 vc->vc_hi_font_mask = 0;
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 74fea4fa41b1..3ad48e1c0c57 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -1024,7 +1024,18 @@ static struct platform_driver ci_hdrc_driver = {
1024 }, 1024 },
1025}; 1025};
1026 1026
1027module_platform_driver(ci_hdrc_driver); 1027static int __init ci_hdrc_platform_register(void)
1028{
1029 ci_hdrc_host_driver_init();
1030 return platform_driver_register(&ci_hdrc_driver);
1031}
1032module_init(ci_hdrc_platform_register);
1033
1034static void __exit ci_hdrc_platform_unregister(void)
1035{
1036 platform_driver_unregister(&ci_hdrc_driver);
1037}
1038module_exit(ci_hdrc_platform_unregister);
1028 1039
1029MODULE_ALIAS("platform:ci_hdrc"); 1040MODULE_ALIAS("platform:ci_hdrc");
1030MODULE_LICENSE("GPL v2"); 1041MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index 6cf87b8b13a8..7161439def19 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -249,9 +249,12 @@ int ci_hdrc_host_init(struct ci_hdrc *ci)
249 rdrv->name = "host"; 249 rdrv->name = "host";
250 ci->roles[CI_ROLE_HOST] = rdrv; 250 ci->roles[CI_ROLE_HOST] = rdrv;
251 251
252 return 0;
253}
254
255void ci_hdrc_host_driver_init(void)
256{
252 ehci_init_driver(&ci_ehci_hc_driver, &ehci_ci_overrides); 257 ehci_init_driver(&ci_ehci_hc_driver, &ehci_ci_overrides);
253 orig_bus_suspend = ci_ehci_hc_driver.bus_suspend; 258 orig_bus_suspend = ci_ehci_hc_driver.bus_suspend;
254 ci_ehci_hc_driver.bus_suspend = ci_ehci_bus_suspend; 259 ci_ehci_hc_driver.bus_suspend = ci_ehci_bus_suspend;
255
256 return 0;
257} 260}
diff --git a/drivers/usb/chipidea/host.h b/drivers/usb/chipidea/host.h
index 5707bf379bfb..0f12f131bdd3 100644
--- a/drivers/usb/chipidea/host.h
+++ b/drivers/usb/chipidea/host.h
@@ -5,6 +5,7 @@
5 5
6int ci_hdrc_host_init(struct ci_hdrc *ci); 6int ci_hdrc_host_init(struct ci_hdrc *ci);
7void ci_hdrc_host_destroy(struct ci_hdrc *ci); 7void ci_hdrc_host_destroy(struct ci_hdrc *ci);
8void ci_hdrc_host_driver_init(void);
8 9
9#else 10#else
10 11
@@ -18,6 +19,11 @@ static inline void ci_hdrc_host_destroy(struct ci_hdrc *ci)
18 19
19} 20}
20 21
22static void ci_hdrc_host_driver_init(void)
23{
24
25}
26
21#endif 27#endif
22 28
23#endif /* __DRIVERS_USB_CHIPIDEA_HOST_H */ 29#endif /* __DRIVERS_USB_CHIPIDEA_HOST_H */
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 519a77ba214c..b30e7423549b 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1944,6 +1944,7 @@ static void __exit acm_exit(void)
1944 usb_deregister(&acm_driver); 1944 usb_deregister(&acm_driver);
1945 tty_unregister_driver(acm_tty_driver); 1945 tty_unregister_driver(acm_tty_driver);
1946 put_tty_driver(acm_tty_driver); 1946 put_tty_driver(acm_tty_driver);
1947 idr_destroy(&acm_minors);
1947} 1948}
1948 1949
1949module_init(acm_init); 1950module_init(acm_init);
diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c
index 0e6f968e93fe..01c0c0477a9e 100644
--- a/drivers/usb/common/ulpi.c
+++ b/drivers/usb/common/ulpi.c
@@ -242,7 +242,7 @@ static int __init ulpi_init(void)
242{ 242{
243 return bus_register(&ulpi_bus); 243 return bus_register(&ulpi_bus);
244} 244}
245module_init(ulpi_init); 245subsys_initcall(ulpi_init);
246 246
247static void __exit ulpi_exit(void) 247static void __exit ulpi_exit(void)
248{ 248{
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index be5b2074f906..cbcd0920fb51 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1022,9 +1022,12 @@ static int register_root_hub(struct usb_hcd *hcd)
1022 dev_name(&usb_dev->dev), retval); 1022 dev_name(&usb_dev->dev), retval);
1023 return (retval < 0) ? retval : -EMSGSIZE; 1023 return (retval < 0) ? retval : -EMSGSIZE;
1024 } 1024 }
1025 if (usb_dev->speed == USB_SPEED_SUPER) { 1025
1026 if (le16_to_cpu(usb_dev->descriptor.bcdUSB) >= 0x0201) {
1026 retval = usb_get_bos_descriptor(usb_dev); 1027 retval = usb_get_bos_descriptor(usb_dev);
1027 if (retval < 0) { 1028 if (!retval) {
1029 usb_dev->lpm_capable = usb_device_supports_lpm(usb_dev);
1030 } else if (usb_dev->speed == USB_SPEED_SUPER) {
1028 mutex_unlock(&usb_bus_list_lock); 1031 mutex_unlock(&usb_bus_list_lock);
1029 dev_dbg(parent_dev, "can't read %s bos descriptor %d\n", 1032 dev_dbg(parent_dev, "can't read %s bos descriptor %d\n",
1030 dev_name(&usb_dev->dev), retval); 1033 dev_name(&usb_dev->dev), retval);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 43cb2f2e3b43..73dfa194160b 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -122,7 +122,7 @@ struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev)
122 return usb_get_intfdata(hdev->actconfig->interface[0]); 122 return usb_get_intfdata(hdev->actconfig->interface[0]);
123} 123}
124 124
125static int usb_device_supports_lpm(struct usb_device *udev) 125int usb_device_supports_lpm(struct usb_device *udev)
126{ 126{
127 /* USB 2.1 (and greater) devices indicate LPM support through 127 /* USB 2.1 (and greater) devices indicate LPM support through
128 * their USB 2.0 Extended Capabilities BOS descriptor. 128 * their USB 2.0 Extended Capabilities BOS descriptor.
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 7eb1e26798e5..457255a3306a 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -65,6 +65,7 @@ extern int usb_hub_init(void);
65extern void usb_hub_cleanup(void); 65extern void usb_hub_cleanup(void);
66extern int usb_major_init(void); 66extern int usb_major_init(void);
67extern void usb_major_cleanup(void); 67extern void usb_major_cleanup(void);
68extern int usb_device_supports_lpm(struct usb_device *udev);
68 69
69#ifdef CONFIG_PM 70#ifdef CONFIG_PM
70 71
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index e5b546f1152e..c3cc1a78d1e2 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -72,17 +72,7 @@ static int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
72 dev_dbg(hsotg->dev, "%s\n", __func__); 72 dev_dbg(hsotg->dev, "%s\n", __func__);
73 73
74 /* Backup Host regs */ 74 /* Backup Host regs */
75 hr = hsotg->hr_backup; 75 hr = &hsotg->hr_backup;
76 if (!hr) {
77 hr = devm_kzalloc(hsotg->dev, sizeof(*hr), GFP_KERNEL);
78 if (!hr) {
79 dev_err(hsotg->dev, "%s: can't allocate host regs\n",
80 __func__);
81 return -ENOMEM;
82 }
83
84 hsotg->hr_backup = hr;
85 }
86 hr->hcfg = readl(hsotg->regs + HCFG); 76 hr->hcfg = readl(hsotg->regs + HCFG);
87 hr->haintmsk = readl(hsotg->regs + HAINTMSK); 77 hr->haintmsk = readl(hsotg->regs + HAINTMSK);
88 for (i = 0; i < hsotg->core_params->host_channels; ++i) 78 for (i = 0; i < hsotg->core_params->host_channels; ++i)
@@ -90,6 +80,7 @@ static int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
90 80
91 hr->hprt0 = readl(hsotg->regs + HPRT0); 81 hr->hprt0 = readl(hsotg->regs + HPRT0);
92 hr->hfir = readl(hsotg->regs + HFIR); 82 hr->hfir = readl(hsotg->regs + HFIR);
83 hr->valid = true;
93 84
94 return 0; 85 return 0;
95} 86}
@@ -109,12 +100,13 @@ static int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
109 dev_dbg(hsotg->dev, "%s\n", __func__); 100 dev_dbg(hsotg->dev, "%s\n", __func__);
110 101
111 /* Restore host regs */ 102 /* Restore host regs */
112 hr = hsotg->hr_backup; 103 hr = &hsotg->hr_backup;
113 if (!hr) { 104 if (!hr->valid) {
114 dev_err(hsotg->dev, "%s: no host registers to restore\n", 105 dev_err(hsotg->dev, "%s: no host registers to restore\n",
115 __func__); 106 __func__);
116 return -EINVAL; 107 return -EINVAL;
117 } 108 }
109 hr->valid = false;
118 110
119 writel(hr->hcfg, hsotg->regs + HCFG); 111 writel(hr->hcfg, hsotg->regs + HCFG);
120 writel(hr->haintmsk, hsotg->regs + HAINTMSK); 112 writel(hr->haintmsk, hsotg->regs + HAINTMSK);
@@ -152,17 +144,7 @@ static int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
152 dev_dbg(hsotg->dev, "%s\n", __func__); 144 dev_dbg(hsotg->dev, "%s\n", __func__);
153 145
154 /* Backup dev regs */ 146 /* Backup dev regs */
155 dr = hsotg->dr_backup; 147 dr = &hsotg->dr_backup;
156 if (!dr) {
157 dr = devm_kzalloc(hsotg->dev, sizeof(*dr), GFP_KERNEL);
158 if (!dr) {
159 dev_err(hsotg->dev, "%s: can't allocate device regs\n",
160 __func__);
161 return -ENOMEM;
162 }
163
164 hsotg->dr_backup = dr;
165 }
166 148
167 dr->dcfg = readl(hsotg->regs + DCFG); 149 dr->dcfg = readl(hsotg->regs + DCFG);
168 dr->dctl = readl(hsotg->regs + DCTL); 150 dr->dctl = readl(hsotg->regs + DCTL);
@@ -195,7 +177,7 @@ static int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
195 dr->doeptsiz[i] = readl(hsotg->regs + DOEPTSIZ(i)); 177 dr->doeptsiz[i] = readl(hsotg->regs + DOEPTSIZ(i));
196 dr->doepdma[i] = readl(hsotg->regs + DOEPDMA(i)); 178 dr->doepdma[i] = readl(hsotg->regs + DOEPDMA(i));
197 } 179 }
198 180 dr->valid = true;
199 return 0; 181 return 0;
200} 182}
201 183
@@ -215,12 +197,13 @@ static int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg)
215 dev_dbg(hsotg->dev, "%s\n", __func__); 197 dev_dbg(hsotg->dev, "%s\n", __func__);
216 198
217 /* Restore dev regs */ 199 /* Restore dev regs */
218 dr = hsotg->dr_backup; 200 dr = &hsotg->dr_backup;
219 if (!dr) { 201 if (!dr->valid) {
220 dev_err(hsotg->dev, "%s: no device registers to restore\n", 202 dev_err(hsotg->dev, "%s: no device registers to restore\n",
221 __func__); 203 __func__);
222 return -EINVAL; 204 return -EINVAL;
223 } 205 }
206 dr->valid = false;
224 207
225 writel(dr->dcfg, hsotg->regs + DCFG); 208 writel(dr->dcfg, hsotg->regs + DCFG);
226 writel(dr->dctl, hsotg->regs + DCTL); 209 writel(dr->dctl, hsotg->regs + DCTL);
@@ -268,17 +251,7 @@ static int dwc2_backup_global_registers(struct dwc2_hsotg *hsotg)
268 int i; 251 int i;
269 252
270 /* Backup global regs */ 253 /* Backup global regs */
271 gr = hsotg->gr_backup; 254 gr = &hsotg->gr_backup;
272 if (!gr) {
273 gr = devm_kzalloc(hsotg->dev, sizeof(*gr), GFP_KERNEL);
274 if (!gr) {
275 dev_err(hsotg->dev, "%s: can't allocate global regs\n",
276 __func__);
277 return -ENOMEM;
278 }
279
280 hsotg->gr_backup = gr;
281 }
282 255
283 gr->gotgctl = readl(hsotg->regs + GOTGCTL); 256 gr->gotgctl = readl(hsotg->regs + GOTGCTL);
284 gr->gintmsk = readl(hsotg->regs + GINTMSK); 257 gr->gintmsk = readl(hsotg->regs + GINTMSK);
@@ -291,6 +264,7 @@ static int dwc2_backup_global_registers(struct dwc2_hsotg *hsotg)
291 for (i = 0; i < MAX_EPS_CHANNELS; i++) 264 for (i = 0; i < MAX_EPS_CHANNELS; i++)
292 gr->dtxfsiz[i] = readl(hsotg->regs + DPTXFSIZN(i)); 265 gr->dtxfsiz[i] = readl(hsotg->regs + DPTXFSIZN(i));
293 266
267 gr->valid = true;
294 return 0; 268 return 0;
295} 269}
296 270
@@ -309,12 +283,13 @@ static int dwc2_restore_global_registers(struct dwc2_hsotg *hsotg)
309 dev_dbg(hsotg->dev, "%s\n", __func__); 283 dev_dbg(hsotg->dev, "%s\n", __func__);
310 284
311 /* Restore global regs */ 285 /* Restore global regs */
312 gr = hsotg->gr_backup; 286 gr = &hsotg->gr_backup;
313 if (!gr) { 287 if (!gr->valid) {
314 dev_err(hsotg->dev, "%s: no global registers to restore\n", 288 dev_err(hsotg->dev, "%s: no global registers to restore\n",
315 __func__); 289 __func__);
316 return -EINVAL; 290 return -EINVAL;
317 } 291 }
292 gr->valid = false;
318 293
319 writel(0xffffffff, hsotg->regs + GINTSTS); 294 writel(0xffffffff, hsotg->regs + GINTSTS);
320 writel(gr->gotgctl, hsotg->regs + GOTGCTL); 295 writel(gr->gotgctl, hsotg->regs + GOTGCTL);
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 53b8de03f102..0ed87620941b 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -492,6 +492,7 @@ struct dwc2_gregs_backup {
492 u32 gdfifocfg; 492 u32 gdfifocfg;
493 u32 dtxfsiz[MAX_EPS_CHANNELS]; 493 u32 dtxfsiz[MAX_EPS_CHANNELS];
494 u32 gpwrdn; 494 u32 gpwrdn;
495 bool valid;
495}; 496};
496 497
497/** 498/**
@@ -521,6 +522,7 @@ struct dwc2_dregs_backup {
521 u32 doepctl[MAX_EPS_CHANNELS]; 522 u32 doepctl[MAX_EPS_CHANNELS];
522 u32 doeptsiz[MAX_EPS_CHANNELS]; 523 u32 doeptsiz[MAX_EPS_CHANNELS];
523 u32 doepdma[MAX_EPS_CHANNELS]; 524 u32 doepdma[MAX_EPS_CHANNELS];
525 bool valid;
524}; 526};
525 527
526/** 528/**
@@ -538,6 +540,7 @@ struct dwc2_hregs_backup {
538 u32 hcintmsk[MAX_EPS_CHANNELS]; 540 u32 hcintmsk[MAX_EPS_CHANNELS];
539 u32 hprt0; 541 u32 hprt0;
540 u32 hfir; 542 u32 hfir;
543 bool valid;
541}; 544};
542 545
543/** 546/**
@@ -705,9 +708,9 @@ struct dwc2_hsotg {
705 struct work_struct wf_otg; 708 struct work_struct wf_otg;
706 struct timer_list wkp_timer; 709 struct timer_list wkp_timer;
707 enum dwc2_lx_state lx_state; 710 enum dwc2_lx_state lx_state;
708 struct dwc2_gregs_backup *gr_backup; 711 struct dwc2_gregs_backup gr_backup;
709 struct dwc2_dregs_backup *dr_backup; 712 struct dwc2_dregs_backup dr_backup;
710 struct dwc2_hregs_backup *hr_backup; 713 struct dwc2_hregs_backup hr_backup;
711 714
712 struct dentry *debug_root; 715 struct dentry *debug_root;
713 struct debugfs_regset32 *regset; 716 struct debugfs_regset32 *regset;
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index b10377c65064..f845c41fe9e5 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -359,10 +359,9 @@ void dwc2_hcd_stop(struct dwc2_hsotg *hsotg)
359 359
360/* Caller must hold driver lock */ 360/* Caller must hold driver lock */
361static int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg, 361static int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg,
362 struct dwc2_hcd_urb *urb, void **ep_handle, 362 struct dwc2_hcd_urb *urb, struct dwc2_qh *qh,
363 gfp_t mem_flags) 363 struct dwc2_qtd *qtd)
364{ 364{
365 struct dwc2_qtd *qtd;
366 u32 intr_mask; 365 u32 intr_mask;
367 int retval; 366 int retval;
368 int dev_speed; 367 int dev_speed;
@@ -386,18 +385,15 @@ static int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg,
386 return -ENODEV; 385 return -ENODEV;
387 } 386 }
388 387
389 qtd = kzalloc(sizeof(*qtd), mem_flags);
390 if (!qtd) 388 if (!qtd)
391 return -ENOMEM; 389 return -EINVAL;
392 390
393 dwc2_hcd_qtd_init(qtd, urb); 391 dwc2_hcd_qtd_init(qtd, urb);
394 retval = dwc2_hcd_qtd_add(hsotg, qtd, (struct dwc2_qh **)ep_handle, 392 retval = dwc2_hcd_qtd_add(hsotg, qtd, qh);
395 mem_flags);
396 if (retval) { 393 if (retval) {
397 dev_err(hsotg->dev, 394 dev_err(hsotg->dev,
398 "DWC OTG HCD URB Enqueue failed adding QTD. Error status %d\n", 395 "DWC OTG HCD URB Enqueue failed adding QTD. Error status %d\n",
399 retval); 396 retval);
400 kfree(qtd);
401 return retval; 397 return retval;
402 } 398 }
403 399
@@ -2445,6 +2441,9 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
2445 u32 tflags = 0; 2441 u32 tflags = 0;
2446 void *buf; 2442 void *buf;
2447 unsigned long flags; 2443 unsigned long flags;
2444 struct dwc2_qh *qh;
2445 bool qh_allocated = false;
2446 struct dwc2_qtd *qtd;
2448 2447
2449 if (dbg_urb(urb)) { 2448 if (dbg_urb(urb)) {
2450 dev_vdbg(hsotg->dev, "DWC OTG HCD URB Enqueue\n"); 2449 dev_vdbg(hsotg->dev, "DWC OTG HCD URB Enqueue\n");
@@ -2523,15 +2522,32 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
2523 urb->iso_frame_desc[i].length); 2522 urb->iso_frame_desc[i].length);
2524 2523
2525 urb->hcpriv = dwc2_urb; 2524 urb->hcpriv = dwc2_urb;
2525 qh = (struct dwc2_qh *) ep->hcpriv;
2526 /* Create QH for the endpoint if it doesn't exist */
2527 if (!qh) {
2528 qh = dwc2_hcd_qh_create(hsotg, dwc2_urb, mem_flags);
2529 if (!qh) {
2530 retval = -ENOMEM;
2531 goto fail0;
2532 }
2533 ep->hcpriv = qh;
2534 qh_allocated = true;
2535 }
2536
2537 qtd = kzalloc(sizeof(*qtd), mem_flags);
2538 if (!qtd) {
2539 retval = -ENOMEM;
2540 goto fail1;
2541 }
2526 2542
2527 spin_lock_irqsave(&hsotg->lock, flags); 2543 spin_lock_irqsave(&hsotg->lock, flags);
2528 retval = usb_hcd_link_urb_to_ep(hcd, urb); 2544 retval = usb_hcd_link_urb_to_ep(hcd, urb);
2529 if (retval) 2545 if (retval)
2530 goto fail1; 2546 goto fail2;
2531 2547
2532 retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, &ep->hcpriv, mem_flags); 2548 retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, qh, qtd);
2533 if (retval) 2549 if (retval)
2534 goto fail2; 2550 goto fail3;
2535 2551
2536 if (alloc_bandwidth) { 2552 if (alloc_bandwidth) {
2537 dwc2_allocate_bus_bandwidth(hcd, 2553 dwc2_allocate_bus_bandwidth(hcd,
@@ -2543,12 +2559,25 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
2543 2559
2544 return 0; 2560 return 0;
2545 2561
2546fail2: 2562fail3:
2547 dwc2_urb->priv = NULL; 2563 dwc2_urb->priv = NULL;
2548 usb_hcd_unlink_urb_from_ep(hcd, urb); 2564 usb_hcd_unlink_urb_from_ep(hcd, urb);
2549fail1: 2565fail2:
2550 spin_unlock_irqrestore(&hsotg->lock, flags); 2566 spin_unlock_irqrestore(&hsotg->lock, flags);
2551 urb->hcpriv = NULL; 2567 urb->hcpriv = NULL;
2568 kfree(qtd);
2569fail1:
2570 if (qh_allocated) {
2571 struct dwc2_qtd *qtd2, *qtd2_tmp;
2572
2573 ep->hcpriv = NULL;
2574 dwc2_hcd_qh_unlink(hsotg, qh);
2575 /* Free each QTD in the QH's QTD list */
2576 list_for_each_entry_safe(qtd2, qtd2_tmp, &qh->qtd_list,
2577 qtd_list_entry)
2578 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd2, qh);
2579 dwc2_hcd_qh_free(hsotg, qh);
2580 }
2552fail0: 2581fail0:
2553 kfree(dwc2_urb); 2582 kfree(dwc2_urb);
2554 2583
diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h
index 7b5841c40033..fc1054965552 100644
--- a/drivers/usb/dwc2/hcd.h
+++ b/drivers/usb/dwc2/hcd.h
@@ -463,6 +463,9 @@ extern void dwc2_hcd_queue_transactions(struct dwc2_hsotg *hsotg,
463/* Schedule Queue Functions */ 463/* Schedule Queue Functions */
464/* Implemented in hcd_queue.c */ 464/* Implemented in hcd_queue.c */
465extern void dwc2_hcd_init_usecs(struct dwc2_hsotg *hsotg); 465extern void dwc2_hcd_init_usecs(struct dwc2_hsotg *hsotg);
466extern struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
467 struct dwc2_hcd_urb *urb,
468 gfp_t mem_flags);
466extern void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh); 469extern void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh);
467extern int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh); 470extern int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh);
468extern void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh); 471extern void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh);
@@ -471,7 +474,7 @@ extern void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
471 474
472extern void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb); 475extern void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb);
473extern int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd, 476extern int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
474 struct dwc2_qh **qh, gfp_t mem_flags); 477 struct dwc2_qh *qh);
475 478
476/* Unlinks and frees a QTD */ 479/* Unlinks and frees a QTD */
477static inline void dwc2_hcd_qtd_unlink_and_free(struct dwc2_hsotg *hsotg, 480static inline void dwc2_hcd_qtd_unlink_and_free(struct dwc2_hsotg *hsotg,
diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
index 9b5c36256627..3ad63d392e13 100644
--- a/drivers/usb/dwc2/hcd_queue.c
+++ b/drivers/usb/dwc2/hcd_queue.c
@@ -191,7 +191,7 @@ static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
191 * 191 *
192 * Return: Pointer to the newly allocated QH, or NULL on error 192 * Return: Pointer to the newly allocated QH, or NULL on error
193 */ 193 */
194static struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg, 194struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
195 struct dwc2_hcd_urb *urb, 195 struct dwc2_hcd_urb *urb,
196 gfp_t mem_flags) 196 gfp_t mem_flags)
197{ 197{
@@ -767,57 +767,32 @@ void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb)
767 * 767 *
768 * @hsotg: The DWC HCD structure 768 * @hsotg: The DWC HCD structure
769 * @qtd: The QTD to add 769 * @qtd: The QTD to add
770 * @qh: Out parameter to return queue head 770 * @qh: Queue head to add qtd to
771 * @atomic_alloc: Flag to do atomic alloc if needed
772 * 771 *
773 * Return: 0 if successful, negative error code otherwise 772 * Return: 0 if successful, negative error code otherwise
774 * 773 *
775 * Finds the correct QH to place the QTD into. If it does not find a QH, it 774 * If the QH to which the QTD is added is not currently scheduled, it is placed
776 * will create a new QH. If the QH to which the QTD is added is not currently 775 * into the proper schedule based on its EP type.
777 * scheduled, it is placed into the proper schedule based on its EP type.
778 */ 776 */
779int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd, 777int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
780 struct dwc2_qh **qh, gfp_t mem_flags) 778 struct dwc2_qh *qh)
781{ 779{
782 struct dwc2_hcd_urb *urb = qtd->urb;
783 int allocated = 0;
784 int retval; 780 int retval;
785 781
786 /* 782 if (unlikely(!qh)) {
787 * Get the QH which holds the QTD-list to insert to. Create QH if it 783 dev_err(hsotg->dev, "%s: Invalid QH\n", __func__);
788 * doesn't exist. 784 retval = -EINVAL;
789 */ 785 goto fail;
790 if (*qh == NULL) {
791 *qh = dwc2_hcd_qh_create(hsotg, urb, mem_flags);
792 if (*qh == NULL)
793 return -ENOMEM;
794 allocated = 1;
795 } 786 }
796 787
797 retval = dwc2_hcd_qh_add(hsotg, *qh); 788 retval = dwc2_hcd_qh_add(hsotg, qh);
798 if (retval) 789 if (retval)
799 goto fail; 790 goto fail;
800 791
801 qtd->qh = *qh; 792 qtd->qh = qh;
802 list_add_tail(&qtd->qtd_list_entry, &(*qh)->qtd_list); 793 list_add_tail(&qtd->qtd_list_entry, &qh->qtd_list);
803 794
804 return 0; 795 return 0;
805
806fail: 796fail:
807 if (allocated) {
808 struct dwc2_qtd *qtd2, *qtd2_tmp;
809 struct dwc2_qh *qh_tmp = *qh;
810
811 *qh = NULL;
812 dwc2_hcd_qh_unlink(hsotg, qh_tmp);
813
814 /* Free each QTD in the QH's QTD list */
815 list_for_each_entry_safe(qtd2, qtd2_tmp, &qh_tmp->qtd_list,
816 qtd_list_entry)
817 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd2, qh_tmp);
818
819 dwc2_hcd_qh_free(hsotg, qh_tmp);
820 }
821
822 return retval; 797 return retval;
823} 798}
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 5c110d8e293b..ff5773c66b84 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -446,10 +446,12 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
446 /* Select the HS PHY interface */ 446 /* Select the HS PHY interface */
447 switch (DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3)) { 447 switch (DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3)) {
448 case DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI: 448 case DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI:
449 if (!strncmp(dwc->hsphy_interface, "utmi", 4)) { 449 if (dwc->hsphy_interface &&
450 !strncmp(dwc->hsphy_interface, "utmi", 4)) {
450 reg &= ~DWC3_GUSB2PHYCFG_ULPI_UTMI; 451 reg &= ~DWC3_GUSB2PHYCFG_ULPI_UTMI;
451 break; 452 break;
452 } else if (!strncmp(dwc->hsphy_interface, "ulpi", 4)) { 453 } else if (dwc->hsphy_interface &&
454 !strncmp(dwc->hsphy_interface, "ulpi", 4)) {
453 reg |= DWC3_GUSB2PHYCFG_ULPI_UTMI; 455 reg |= DWC3_GUSB2PHYCFG_ULPI_UTMI;
454 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); 456 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
455 } else { 457 } else {
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 2ef3c8d6a9db..69e769c35cf5 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -727,6 +727,10 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
727 dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY"); 727 dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY");
728 ret = dwc3_ep0_set_isoch_delay(dwc, ctrl); 728 ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
729 break; 729 break;
730 case USB_REQ_SET_INTERFACE:
731 dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_INTERFACE");
732 dwc->start_config_issued = false;
733 /* Fall through */
730 default: 734 default:
731 dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver"); 735 dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver");
732 ret = dwc3_ep0_delegate_req(dwc, ctrl); 736 ret = dwc3_ep0_delegate_req(dwc, ctrl);
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 4e3447bbd097..58b4657fc721 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1758,10 +1758,13 @@ unknown:
1758 * take such requests too, if that's ever needed: to work 1758 * take such requests too, if that's ever needed: to work
1759 * in config 0, etc. 1759 * in config 0, etc.
1760 */ 1760 */
1761 list_for_each_entry(f, &cdev->config->functions, list) 1761 if (cdev->config) {
1762 if (f->req_match && f->req_match(f, ctrl)) 1762 list_for_each_entry(f, &cdev->config->functions, list)
1763 goto try_fun_setup; 1763 if (f->req_match && f->req_match(f, ctrl))
1764 f = NULL; 1764 goto try_fun_setup;
1765 f = NULL;
1766 }
1767
1765 switch (ctrl->bRequestType & USB_RECIP_MASK) { 1768 switch (ctrl->bRequestType & USB_RECIP_MASK) {
1766 case USB_RECIP_INTERFACE: 1769 case USB_RECIP_INTERFACE:
1767 if (!cdev->config || intf >= MAX_CONFIG_INTERFACES) 1770 if (!cdev->config || intf >= MAX_CONFIG_INTERFACES)
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 0495c94a23d7..289e20119fea 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -571,7 +571,7 @@ static struct config_group *function_make(
571 if (IS_ERR(fi)) 571 if (IS_ERR(fi))
572 return ERR_CAST(fi); 572 return ERR_CAST(fi);
573 573
574 ret = config_item_set_name(&fi->group.cg_item, name); 574 ret = config_item_set_name(&fi->group.cg_item, "%s", name);
575 if (ret) { 575 if (ret) {
576 usb_put_function_instance(fi); 576 usb_put_function_instance(fi);
577 return ERR_PTR(ret); 577 return ERR_PTR(ret);
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 45b8c8b338df..6e7be91e6097 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -924,7 +924,8 @@ static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from)
924 924
925 kiocb->private = p; 925 kiocb->private = p;
926 926
927 kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); 927 if (p->aio)
928 kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
928 929
929 res = ffs_epfile_io(kiocb->ki_filp, p); 930 res = ffs_epfile_io(kiocb->ki_filp, p);
930 if (res == -EIOCBQUEUED) 931 if (res == -EIOCBQUEUED)
@@ -968,7 +969,8 @@ static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
968 969
969 kiocb->private = p; 970 kiocb->private = p;
970 971
971 kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); 972 if (p->aio)
973 kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
972 974
973 res = ffs_epfile_io(kiocb->ki_filp, p); 975 res = ffs_epfile_io(kiocb->ki_filp, p);
974 if (res == -EIOCBQUEUED) 976 if (res == -EIOCBQUEUED)
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index f7f35a36c09a..6df9715a4bcd 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -699,6 +699,10 @@ static inline int hidg_get_minor(void)
699 int ret; 699 int ret;
700 700
701 ret = ida_simple_get(&hidg_ida, 0, 0, GFP_KERNEL); 701 ret = ida_simple_get(&hidg_ida, 0, 0, GFP_KERNEL);
702 if (ret >= HIDG_MINORS) {
703 ida_simple_remove(&hidg_ida, ret);
704 ret = -ENODEV;
705 }
702 706
703 return ret; 707 return ret;
704} 708}
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index d2259c663996..f936268d26c6 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -2786,7 +2786,7 @@ int fsg_common_set_nluns(struct fsg_common *common, int nluns)
2786 return -EINVAL; 2786 return -EINVAL;
2787 } 2787 }
2788 2788
2789 curlun = kcalloc(nluns, sizeof(*curlun), GFP_KERNEL); 2789 curlun = kcalloc(FSG_MAX_LUNS, sizeof(*curlun), GFP_KERNEL);
2790 if (unlikely(!curlun)) 2790 if (unlikely(!curlun))
2791 return -ENOMEM; 2791 return -ENOMEM;
2792 2792
@@ -2796,8 +2796,6 @@ int fsg_common_set_nluns(struct fsg_common *common, int nluns)
2796 common->luns = curlun; 2796 common->luns = curlun;
2797 common->nluns = nluns; 2797 common->nluns = nluns;
2798 2798
2799 pr_info("Number of LUNs=%d\n", common->nluns);
2800
2801 return 0; 2799 return 0;
2802} 2800}
2803EXPORT_SYMBOL_GPL(fsg_common_set_nluns); 2801EXPORT_SYMBOL_GPL(fsg_common_set_nluns);
@@ -3563,14 +3561,26 @@ static struct usb_function *fsg_alloc(struct usb_function_instance *fi)
3563 struct fsg_opts *opts = fsg_opts_from_func_inst(fi); 3561 struct fsg_opts *opts = fsg_opts_from_func_inst(fi);
3564 struct fsg_common *common = opts->common; 3562 struct fsg_common *common = opts->common;
3565 struct fsg_dev *fsg; 3563 struct fsg_dev *fsg;
3564 unsigned nluns, i;
3566 3565
3567 fsg = kzalloc(sizeof(*fsg), GFP_KERNEL); 3566 fsg = kzalloc(sizeof(*fsg), GFP_KERNEL);
3568 if (unlikely(!fsg)) 3567 if (unlikely(!fsg))
3569 return ERR_PTR(-ENOMEM); 3568 return ERR_PTR(-ENOMEM);
3570 3569
3571 mutex_lock(&opts->lock); 3570 mutex_lock(&opts->lock);
3571 if (!opts->refcnt) {
3572 for (nluns = i = 0; i < FSG_MAX_LUNS; ++i)
3573 if (common->luns[i])
3574 nluns = i + 1;
3575 if (!nluns)
3576 pr_warn("No LUNS defined, continuing anyway\n");
3577 else
3578 common->nluns = nluns;
3579 pr_info("Number of LUNs=%u\n", common->nluns);
3580 }
3572 opts->refcnt++; 3581 opts->refcnt++;
3573 mutex_unlock(&opts->lock); 3582 mutex_unlock(&opts->lock);
3583
3574 fsg->function.name = FSG_DRIVER_DESC; 3584 fsg->function.name = FSG_DRIVER_DESC;
3575 fsg->function.bind = fsg_bind; 3585 fsg->function.bind = fsg_bind;
3576 fsg->function.unbind = fsg_unbind; 3586 fsg->function.unbind = fsg_unbind;
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 6316aa5b1c49..ad50a67c1465 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -1145,7 +1145,7 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi)
1145 if (opts->id && !midi->id) { 1145 if (opts->id && !midi->id) {
1146 status = -ENOMEM; 1146 status = -ENOMEM;
1147 mutex_unlock(&opts->lock); 1147 mutex_unlock(&opts->lock);
1148 goto kstrdup_fail; 1148 goto setup_fail;
1149 } 1149 }
1150 midi->in_ports = opts->in_ports; 1150 midi->in_ports = opts->in_ports;
1151 midi->out_ports = opts->out_ports; 1151 midi->out_ports = opts->out_ports;
@@ -1164,8 +1164,6 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi)
1164 1164
1165 return &midi->func; 1165 return &midi->func;
1166 1166
1167kstrdup_fail:
1168 f_midi_unregister_card(midi);
1169setup_fail: 1167setup_fail:
1170 for (--i; i >= 0; i--) 1168 for (--i; i >= 0; i--)
1171 kfree(midi->in_port[i]); 1169 kfree(midi->in_port[i]);
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index 44173df27273..357f63f47b42 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -1248,7 +1248,15 @@ static struct config_item_type printer_func_type = {
1248 1248
1249static inline int gprinter_get_minor(void) 1249static inline int gprinter_get_minor(void)
1250{ 1250{
1251 return ida_simple_get(&printer_ida, 0, 0, GFP_KERNEL); 1251 int ret;
1252
1253 ret = ida_simple_get(&printer_ida, 0, 0, GFP_KERNEL);
1254 if (ret >= PRINTER_MINORS) {
1255 ida_simple_remove(&printer_ida, ret);
1256 ret = -ENODEV;
1257 }
1258
1259 return ret;
1252} 1260}
1253 1261
1254static inline void gprinter_put_minor(int minor) 1262static inline void gprinter_put_minor(int minor)
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 6d3eb8b00a48..531861547253 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -1162,14 +1162,14 @@ afunc_set_alt(struct usb_function *fn, unsigned intf, unsigned alt)
1162 factor = 1000; 1162 factor = 1000;
1163 } else { 1163 } else {
1164 ep_desc = &hs_epin_desc; 1164 ep_desc = &hs_epin_desc;
1165 factor = 125; 1165 factor = 8000;
1166 } 1166 }
1167 1167
1168 /* pre-compute some values for iso_complete() */ 1168 /* pre-compute some values for iso_complete() */
1169 uac2->p_framesize = opts->p_ssize * 1169 uac2->p_framesize = opts->p_ssize *
1170 num_channels(opts->p_chmask); 1170 num_channels(opts->p_chmask);
1171 rate = opts->p_srate * uac2->p_framesize; 1171 rate = opts->p_srate * uac2->p_framesize;
1172 uac2->p_interval = (1 << (ep_desc->bInterval - 1)) * factor; 1172 uac2->p_interval = factor / (1 << (ep_desc->bInterval - 1));
1173 uac2->p_pktsize = min_t(unsigned int, rate / uac2->p_interval, 1173 uac2->p_pktsize = min_t(unsigned int, rate / uac2->p_interval,
1174 prm->max_psize); 1174 prm->max_psize);
1175 1175
diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c
index b04980cf6dc4..1efa61265d8d 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_ep.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c
@@ -779,7 +779,7 @@ static int ep_dequeue(struct bdc_ep *ep, struct bdc_req *req)
779 /* The current hw dequeue pointer */ 779 /* The current hw dequeue pointer */
780 tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS0(0)); 780 tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS0(0));
781 deq_ptr_64 = tmp_32; 781 deq_ptr_64 = tmp_32;
782 tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS0(1)); 782 tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS1(0));
783 deq_ptr_64 |= ((u64)tmp_32 << 32); 783 deq_ptr_64 |= ((u64)tmp_32 << 32);
784 784
785 /* we have the dma addr of next bd that will be fetched by hardware */ 785 /* we have the dma addr of next bd that will be fetched by hardware */
diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c
index e547ea7f56b1..1137e3384218 100644
--- a/drivers/usb/gadget/udc/fotg210-udc.c
+++ b/drivers/usb/gadget/udc/fotg210-udc.c
@@ -1171,7 +1171,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
1171 udc_name, fotg210); 1171 udc_name, fotg210);
1172 if (ret < 0) { 1172 if (ret < 0) {
1173 pr_err("request_irq error (%d)\n", ret); 1173 pr_err("request_irq error (%d)\n", ret);
1174 goto err_irq; 1174 goto err_req;
1175 } 1175 }
1176 1176
1177 ret = usb_add_gadget_udc(&pdev->dev, &fotg210->gadget); 1177 ret = usb_add_gadget_udc(&pdev->dev, &fotg210->gadget);
@@ -1183,7 +1183,6 @@ static int fotg210_udc_probe(struct platform_device *pdev)
1183 return 0; 1183 return 0;
1184 1184
1185err_add_udc: 1185err_add_udc:
1186err_irq:
1187 free_irq(ires->start, fotg210); 1186 free_irq(ires->start, fotg210);
1188 1187
1189err_req: 1188err_req:
diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
index d32160d6463f..5da37c957b53 100644
--- a/drivers/usb/gadget/udc/mv_udc_core.c
+++ b/drivers/usb/gadget/udc/mv_udc_core.c
@@ -2167,7 +2167,7 @@ static int mv_udc_probe(struct platform_device *pdev)
2167 return -ENODEV; 2167 return -ENODEV;
2168 } 2168 }
2169 2169
2170 udc->phy_regs = ioremap(r->start, resource_size(r)); 2170 udc->phy_regs = devm_ioremap(&pdev->dev, r->start, resource_size(r));
2171 if (udc->phy_regs == NULL) { 2171 if (udc->phy_regs == NULL) {
2172 dev_err(&pdev->dev, "failed to map phy I/O memory\n"); 2172 dev_err(&pdev->dev, "failed to map phy I/O memory\n");
2173 return -EBUSY; 2173 return -EBUSY;
diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
index d69c35558f68..89ed5e71a199 100644
--- a/drivers/usb/gadget/udc/udc-core.c
+++ b/drivers/usb/gadget/udc/udc-core.c
@@ -60,13 +60,15 @@ static DEFINE_MUTEX(udc_lock);
60int usb_gadget_map_request(struct usb_gadget *gadget, 60int usb_gadget_map_request(struct usb_gadget *gadget,
61 struct usb_request *req, int is_in) 61 struct usb_request *req, int is_in)
62{ 62{
63 struct device *dev = gadget->dev.parent;
64
63 if (req->length == 0) 65 if (req->length == 0)
64 return 0; 66 return 0;
65 67
66 if (req->num_sgs) { 68 if (req->num_sgs) {
67 int mapped; 69 int mapped;
68 70
69 mapped = dma_map_sg(&gadget->dev, req->sg, req->num_sgs, 71 mapped = dma_map_sg(dev, req->sg, req->num_sgs,
70 is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 72 is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
71 if (mapped == 0) { 73 if (mapped == 0) {
72 dev_err(&gadget->dev, "failed to map SGs\n"); 74 dev_err(&gadget->dev, "failed to map SGs\n");
@@ -75,11 +77,11 @@ int usb_gadget_map_request(struct usb_gadget *gadget,
75 77
76 req->num_mapped_sgs = mapped; 78 req->num_mapped_sgs = mapped;
77 } else { 79 } else {
78 req->dma = dma_map_single(&gadget->dev, req->buf, req->length, 80 req->dma = dma_map_single(dev, req->buf, req->length,
79 is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 81 is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
80 82
81 if (dma_mapping_error(&gadget->dev, req->dma)) { 83 if (dma_mapping_error(dev, req->dma)) {
82 dev_err(&gadget->dev, "failed to map buffer\n"); 84 dev_err(dev, "failed to map buffer\n");
83 return -EFAULT; 85 return -EFAULT;
84 } 86 }
85 } 87 }
@@ -95,12 +97,12 @@ void usb_gadget_unmap_request(struct usb_gadget *gadget,
95 return; 97 return;
96 98
97 if (req->num_mapped_sgs) { 99 if (req->num_mapped_sgs) {
98 dma_unmap_sg(&gadget->dev, req->sg, req->num_mapped_sgs, 100 dma_unmap_sg(gadget->dev.parent, req->sg, req->num_mapped_sgs,
99 is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 101 is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
100 102
101 req->num_mapped_sgs = 0; 103 req->num_mapped_sgs = 0;
102 } else { 104 } else {
103 dma_unmap_single(&gadget->dev, req->dma, req->length, 105 dma_unmap_single(gadget->dev.parent, req->dma, req->length,
104 is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 106 is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
105 } 107 }
106} 108}
@@ -321,6 +323,7 @@ err4:
321 323
322err3: 324err3:
323 put_device(&udc->dev); 325 put_device(&udc->dev);
326 device_del(&gadget->dev);
324 327
325err2: 328err2:
326 put_device(&gadget->dev); 329 put_device(&gadget->dev);
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index f7d561ed3c23..d029bbe9eb36 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -981,10 +981,6 @@ rescan_all:
981 int completed, modified; 981 int completed, modified;
982 __hc32 *prev; 982 __hc32 *prev;
983 983
984 /* Is this ED already invisible to the hardware? */
985 if (ed->state == ED_IDLE)
986 goto ed_idle;
987
988 /* only take off EDs that the HC isn't using, accounting for 984 /* only take off EDs that the HC isn't using, accounting for
989 * frame counter wraps and EDs with partially retired TDs 985 * frame counter wraps and EDs with partially retired TDs
990 */ 986 */
@@ -1012,12 +1008,10 @@ skip_ed:
1012 } 1008 }
1013 1009
1014 /* ED's now officially unlinked, hc doesn't see */ 1010 /* ED's now officially unlinked, hc doesn't see */
1015 ed->state = ED_IDLE;
1016 ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H); 1011 ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H);
1017 ed->hwNextED = 0; 1012 ed->hwNextED = 0;
1018 wmb(); 1013 wmb();
1019 ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE); 1014 ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE);
1020ed_idle:
1021 1015
1022 /* reentrancy: if we drop the schedule lock, someone might 1016 /* reentrancy: if we drop the schedule lock, someone might
1023 * have modified this list. normally it's just prepending 1017 * have modified this list. normally it's just prepending
@@ -1088,6 +1082,7 @@ rescan_this:
1088 if (list_empty(&ed->td_list)) { 1082 if (list_empty(&ed->td_list)) {
1089 *last = ed->ed_next; 1083 *last = ed->ed_next;
1090 ed->ed_next = NULL; 1084 ed->ed_next = NULL;
1085 ed->state = ED_IDLE;
1091 list_del(&ed->in_use_list); 1086 list_del(&ed->in_use_list);
1092 } else if (ohci->rh_state == OHCI_RH_RUNNING) { 1087 } else if (ohci->rh_state == OHCI_RH_RUNNING) {
1093 *last = ed->ed_next; 1088 *last = ed->ed_next;
diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c
index e9a6eec39142..cfcfadfc94fc 100644
--- a/drivers/usb/host/ohci-tmio.c
+++ b/drivers/usb/host/ohci-tmio.c
@@ -58,7 +58,7 @@
58#define CCR_PM_CKRNEN 0x0002 58#define CCR_PM_CKRNEN 0x0002
59#define CCR_PM_USBPW1 0x0004 59#define CCR_PM_USBPW1 0x0004
60#define CCR_PM_USBPW2 0x0008 60#define CCR_PM_USBPW2 0x0008
61#define CCR_PM_USBPW3 0x0008 61#define CCR_PM_USBPW3 0x0010
62#define CCR_PM_PMEE 0x0100 62#define CCR_PM_PMEE 0x0100
63#define CCR_PM_PMES 0x8000 63#define CCR_PM_PMES 0x8000
64 64
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index e75c565feb53..78241b5550df 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -484,10 +484,13 @@ static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci,
484 u32 pls = status_reg & PORT_PLS_MASK; 484 u32 pls = status_reg & PORT_PLS_MASK;
485 485
486 /* resume state is a xHCI internal state. 486 /* resume state is a xHCI internal state.
487 * Do not report it to usb core. 487 * Do not report it to usb core, instead, pretend to be U3,
488 * thus usb core knows it's not ready for transfer
488 */ 489 */
489 if (pls == XDEV_RESUME) 490 if (pls == XDEV_RESUME) {
491 *status |= USB_SS_PORT_LS_U3;
490 return; 492 return;
493 }
491 494
492 /* When the CAS bit is set then warm reset 495 /* When the CAS bit is set then warm reset
493 * should be performed on port 496 * should be performed on port
@@ -588,7 +591,14 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
588 status |= USB_PORT_STAT_C_RESET << 16; 591 status |= USB_PORT_STAT_C_RESET << 16;
589 /* USB3.0 only */ 592 /* USB3.0 only */
590 if (hcd->speed == HCD_USB3) { 593 if (hcd->speed == HCD_USB3) {
591 if ((raw_port_status & PORT_PLC)) 594 /* Port link change with port in resume state should not be
595 * reported to usbcore, as this is an internal state to be
596 * handled by xhci driver. Reporting PLC to usbcore may
597 * cause usbcore clearing PLC first and port change event
598 * irq won't be generated.
599 */
600 if ((raw_port_status & PORT_PLC) &&
601 (raw_port_status & PORT_PLS_MASK) != XDEV_RESUME)
592 status |= USB_PORT_STAT_C_LINK_STATE << 16; 602 status |= USB_PORT_STAT_C_LINK_STATE << 16;
593 if ((raw_port_status & PORT_WRC)) 603 if ((raw_port_status & PORT_WRC))
594 status |= USB_PORT_STAT_C_BH_RESET << 16; 604 status |= USB_PORT_STAT_C_BH_RESET << 16;
@@ -1120,10 +1130,10 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
1120 spin_lock_irqsave(&xhci->lock, flags); 1130 spin_lock_irqsave(&xhci->lock, flags);
1121 1131
1122 if (hcd->self.root_hub->do_remote_wakeup) { 1132 if (hcd->self.root_hub->do_remote_wakeup) {
1123 if (bus_state->resuming_ports) { 1133 if (bus_state->resuming_ports || /* USB2 */
1134 bus_state->port_remote_wakeup) { /* USB3 */
1124 spin_unlock_irqrestore(&xhci->lock, flags); 1135 spin_unlock_irqrestore(&xhci->lock, flags);
1125 xhci_dbg(xhci, "suspend failed because " 1136 xhci_dbg(xhci, "suspend failed because a port is resuming\n");
1126 "a port is resuming\n");
1127 return -EBUSY; 1137 return -EBUSY;
1128 } 1138 }
1129 } 1139 }
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index f8336408ef07..9a8c936cd42c 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1427,10 +1427,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
1427 /* Attempt to use the ring cache */ 1427 /* Attempt to use the ring cache */
1428 if (virt_dev->num_rings_cached == 0) 1428 if (virt_dev->num_rings_cached == 0)
1429 return -ENOMEM; 1429 return -ENOMEM;
1430 virt_dev->num_rings_cached--;
1430 virt_dev->eps[ep_index].new_ring = 1431 virt_dev->eps[ep_index].new_ring =
1431 virt_dev->ring_cache[virt_dev->num_rings_cached]; 1432 virt_dev->ring_cache[virt_dev->num_rings_cached];
1432 virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL; 1433 virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
1433 virt_dev->num_rings_cached--;
1434 xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring, 1434 xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
1435 1, type); 1435 1, type);
1436 } 1436 }
@@ -1792,7 +1792,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
1792 int size; 1792 int size;
1793 int i, j, num_ports; 1793 int i, j, num_ports;
1794 1794
1795 del_timer_sync(&xhci->cmd_timer); 1795 if (timer_pending(&xhci->cmd_timer))
1796 del_timer_sync(&xhci->cmd_timer);
1796 1797
1797 /* Free the Event Ring Segment Table and the actual Event Ring */ 1798 /* Free the Event Ring Segment Table and the actual Event Ring */
1798 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); 1799 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 4a4cb1d91ac8..5590eac2b22d 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -23,10 +23,15 @@
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/acpi.h>
26 27
27#include "xhci.h" 28#include "xhci.h"
28#include "xhci-trace.h" 29#include "xhci-trace.h"
29 30
31#define PORT2_SSIC_CONFIG_REG2 0x883c
32#define PROG_DONE (1 << 30)
33#define SSIC_PORT_UNUSED (1 << 31)
34
30/* Device for a quirk */ 35/* Device for a quirk */
31#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73 36#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73
32#define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000 37#define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000
@@ -176,20 +181,63 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
176} 181}
177 182
178/* 183/*
184 * In some Intel xHCI controllers, in order to get D3 working,
185 * through a vendor specific SSIC CONFIG register at offset 0x883c,
186 * SSIC PORT need to be marked as "unused" before putting xHCI
187 * into D3. After D3 exit, the SSIC port need to be marked as "used".
188 * Without this change, xHCI might not enter D3 state.
179 * Make sure PME works on some Intel xHCI controllers by writing 1 to clear 189 * Make sure PME works on some Intel xHCI controllers by writing 1 to clear
180 * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4 190 * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
181 */ 191 */
182static void xhci_pme_quirk(struct xhci_hcd *xhci) 192static void xhci_pme_quirk(struct usb_hcd *hcd, bool suspend)
183{ 193{
194 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
195 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
184 u32 val; 196 u32 val;
185 void __iomem *reg; 197 void __iomem *reg;
186 198
199 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
200 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
201
202 reg = (void __iomem *) xhci->cap_regs + PORT2_SSIC_CONFIG_REG2;
203
204 /* Notify SSIC that SSIC profile programming is not done */
205 val = readl(reg) & ~PROG_DONE;
206 writel(val, reg);
207
208 /* Mark SSIC port as unused(suspend) or used(resume) */
209 val = readl(reg);
210 if (suspend)
211 val |= SSIC_PORT_UNUSED;
212 else
213 val &= ~SSIC_PORT_UNUSED;
214 writel(val, reg);
215
216 /* Notify SSIC that SSIC profile programming is done */
217 val = readl(reg) | PROG_DONE;
218 writel(val, reg);
219 readl(reg);
220 }
221
187 reg = (void __iomem *) xhci->cap_regs + 0x80a4; 222 reg = (void __iomem *) xhci->cap_regs + 0x80a4;
188 val = readl(reg); 223 val = readl(reg);
189 writel(val | BIT(28), reg); 224 writel(val | BIT(28), reg);
190 readl(reg); 225 readl(reg);
191} 226}
192 227
228#ifdef CONFIG_ACPI
229static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev)
230{
231 static const u8 intel_dsm_uuid[] = {
232 0xb7, 0x0c, 0x34, 0xac, 0x01, 0xe9, 0xbf, 0x45,
233 0xb7, 0xe6, 0x2b, 0x34, 0xec, 0x93, 0x1e, 0x23,
234 };
235 acpi_evaluate_dsm(ACPI_HANDLE(&dev->dev), intel_dsm_uuid, 3, 1, NULL);
236}
237#else
238 static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) { }
239#endif /* CONFIG_ACPI */
240
193/* called during probe() after chip reset completes */ 241/* called during probe() after chip reset completes */
194static int xhci_pci_setup(struct usb_hcd *hcd) 242static int xhci_pci_setup(struct usb_hcd *hcd)
195{ 243{
@@ -263,6 +311,9 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
263 HCC_MAX_PSA(xhci->hcc_params) >= 4) 311 HCC_MAX_PSA(xhci->hcc_params) >= 4)
264 xhci->shared_hcd->can_do_streams = 1; 312 xhci->shared_hcd->can_do_streams = 1;
265 313
314 if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
315 xhci_pme_acpi_rtd3_enable(dev);
316
266 /* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */ 317 /* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */
267 pm_runtime_put_noidle(&dev->dev); 318 pm_runtime_put_noidle(&dev->dev);
268 319
@@ -307,7 +358,7 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
307 pdev->no_d3cold = true; 358 pdev->no_d3cold = true;
308 359
309 if (xhci->quirks & XHCI_PME_STUCK_QUIRK) 360 if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
310 xhci_pme_quirk(xhci); 361 xhci_pme_quirk(hcd, true);
311 362
312 return xhci_suspend(xhci, do_wakeup); 363 return xhci_suspend(xhci, do_wakeup);
313} 364}
@@ -340,7 +391,7 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
340 usb_enable_intel_xhci_ports(pdev); 391 usb_enable_intel_xhci_ports(pdev);
341 392
342 if (xhci->quirks & XHCI_PME_STUCK_QUIRK) 393 if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
343 xhci_pme_quirk(xhci); 394 xhci_pme_quirk(hcd, false);
344 395
345 retval = xhci_resume(xhci, hibernated); 396 retval = xhci_resume(xhci, hibernated);
346 return retval; 397 return retval;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 94416ff70810..32f4d564494a 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -82,7 +82,7 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
82 return 0; 82 return 0;
83 /* offset in TRBs */ 83 /* offset in TRBs */
84 segment_offset = trb - seg->trbs; 84 segment_offset = trb - seg->trbs;
85 if (segment_offset > TRBS_PER_SEGMENT) 85 if (segment_offset >= TRBS_PER_SEGMENT)
86 return 0; 86 return 0;
87 return seg->dma + (segment_offset * sizeof(*trb)); 87 return seg->dma + (segment_offset * sizeof(*trb));
88} 88}
@@ -1546,6 +1546,9 @@ static void handle_port_status(struct xhci_hcd *xhci,
1546 usb_hcd_resume_root_hub(hcd); 1546 usb_hcd_resume_root_hub(hcd);
1547 } 1547 }
1548 1548
1549 if (hcd->speed == HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE)
1550 bus_state->port_remote_wakeup &= ~(1 << faked_port_index);
1551
1549 if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) { 1552 if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
1550 xhci_dbg(xhci, "port resume event for port %d\n", port_id); 1553 xhci_dbg(xhci, "port resume event for port %d\n", port_id);
1551 1554
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 7da0d6043d33..526ebc0c7e72 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -3453,6 +3453,9 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
3453 return -EINVAL; 3453 return -EINVAL;
3454 } 3454 }
3455 3455
3456 if (virt_dev->tt_info)
3457 old_active_eps = virt_dev->tt_info->active_eps;
3458
3456 if (virt_dev->udev != udev) { 3459 if (virt_dev->udev != udev) {
3457 /* If the virt_dev and the udev does not match, this virt_dev 3460 /* If the virt_dev and the udev does not match, this virt_dev
3458 * may belong to another udev. 3461 * may belong to another udev.
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 31e46cc55807..ed2ebf647c38 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -285,6 +285,7 @@ struct xhci_op_regs {
285#define XDEV_U0 (0x0 << 5) 285#define XDEV_U0 (0x0 << 5)
286#define XDEV_U2 (0x2 << 5) 286#define XDEV_U2 (0x2 << 5)
287#define XDEV_U3 (0x3 << 5) 287#define XDEV_U3 (0x3 << 5)
288#define XDEV_INACTIVE (0x6 << 5)
288#define XDEV_RESUME (0xf << 5) 289#define XDEV_RESUME (0xf << 5)
289/* true: port has power (see HCC_PPC) */ 290/* true: port has power (see HCC_PPC) */
290#define PORT_POWER (1 << 9) 291#define PORT_POWER (1 << 9)
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index 30842bc195f5..92d5f718659b 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -275,9 +275,7 @@ static int musb_has_gadget(struct musb *musb)
275#ifdef CONFIG_USB_MUSB_HOST 275#ifdef CONFIG_USB_MUSB_HOST
276 return 1; 276 return 1;
277#else 277#else
278 if (musb->port_mode == MUSB_PORT_MODE_HOST) 278 return musb->port_mode == MUSB_PORT_MODE_HOST;
279 return 1;
280 return musb->g.dev.driver != NULL;
281#endif 279#endif
282} 280}
283 281
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index 8f7cb068d29b..3fcc0483a081 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -217,6 +217,9 @@ static bool mxs_phy_get_vbus_status(struct mxs_phy *mxs_phy)
217{ 217{
218 unsigned int vbus_value; 218 unsigned int vbus_value;
219 219
220 if (!mxs_phy->regmap_anatop)
221 return false;
222
220 if (mxs_phy->port_id == 0) 223 if (mxs_phy->port_id == 0)
221 regmap_read(mxs_phy->regmap_anatop, 224 regmap_read(mxs_phy->regmap_anatop,
222 ANADIG_USB1_VBUS_DET_STAT, 225 ANADIG_USB1_VBUS_DET_STAT,
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index ffd739e31bfc..eac7ccaa3c85 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -187,6 +187,7 @@ static const struct usb_device_id id_table[] = {
187 { USB_DEVICE(0x1FB9, 0x0602) }, /* Lake Shore Model 648 Magnet Power Supply */ 187 { USB_DEVICE(0x1FB9, 0x0602) }, /* Lake Shore Model 648 Magnet Power Supply */
188 { USB_DEVICE(0x1FB9, 0x0700) }, /* Lake Shore Model 737 VSM Controller */ 188 { USB_DEVICE(0x1FB9, 0x0700) }, /* Lake Shore Model 737 VSM Controller */
189 { USB_DEVICE(0x1FB9, 0x0701) }, /* Lake Shore Model 776 Hall Matrix */ 189 { USB_DEVICE(0x1FB9, 0x0701) }, /* Lake Shore Model 776 Hall Matrix */
190 { USB_DEVICE(0x2626, 0xEA60) }, /* Aruba Networks 7xxx USB Serial Console */
190 { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */ 191 { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
191 { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */ 192 { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
192 { USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */ 193 { USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 4f70df33975a..78b4f64c6b00 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -121,26 +121,26 @@ static DEFINE_SPINLOCK(release_lock);
121static const unsigned int dummy; /* for clarity in register access fns */ 121static const unsigned int dummy; /* for clarity in register access fns */
122 122
123enum mos_regs { 123enum mos_regs {
124 THR, /* serial port regs */ 124 MOS7720_THR, /* serial port regs */
125 RHR, 125 MOS7720_RHR,
126 IER, 126 MOS7720_IER,
127 FCR, 127 MOS7720_FCR,
128 ISR, 128 MOS7720_ISR,
129 LCR, 129 MOS7720_LCR,
130 MCR, 130 MOS7720_MCR,
131 LSR, 131 MOS7720_LSR,
132 MSR, 132 MOS7720_MSR,
133 SPR, 133 MOS7720_SPR,
134 DLL, 134 MOS7720_DLL,
135 DLM, 135 MOS7720_DLM,
136 DPR, /* parallel port regs */ 136 MOS7720_DPR, /* parallel port regs */
137 DSR, 137 MOS7720_DSR,
138 DCR, 138 MOS7720_DCR,
139 ECR, 139 MOS7720_ECR,
140 SP1_REG, /* device control regs */ 140 MOS7720_SP1_REG, /* device control regs */
141 SP2_REG, /* serial port 2 (7720 only) */ 141 MOS7720_SP2_REG, /* serial port 2 (7720 only) */
142 PP_REG, 142 MOS7720_PP_REG,
143 SP_CONTROL_REG, 143 MOS7720_SP_CONTROL_REG,
144}; 144};
145 145
146/* 146/*
@@ -150,26 +150,26 @@ enum mos_regs {
150static inline __u16 get_reg_index(enum mos_regs reg) 150static inline __u16 get_reg_index(enum mos_regs reg)
151{ 151{
152 static const __u16 mos7715_index_lookup_table[] = { 152 static const __u16 mos7715_index_lookup_table[] = {
153 0x00, /* THR */ 153 0x00, /* MOS7720_THR */
154 0x00, /* RHR */ 154 0x00, /* MOS7720_RHR */
155 0x01, /* IER */ 155 0x01, /* MOS7720_IER */
156 0x02, /* FCR */ 156 0x02, /* MOS7720_FCR */
157 0x02, /* ISR */ 157 0x02, /* MOS7720_ISR */
158 0x03, /* LCR */ 158 0x03, /* MOS7720_LCR */
159 0x04, /* MCR */ 159 0x04, /* MOS7720_MCR */
160 0x05, /* LSR */ 160 0x05, /* MOS7720_LSR */
161 0x06, /* MSR */ 161 0x06, /* MOS7720_MSR */
162 0x07, /* SPR */ 162 0x07, /* MOS7720_SPR */
163 0x00, /* DLL */ 163 0x00, /* MOS7720_DLL */
164 0x01, /* DLM */ 164 0x01, /* MOS7720_DLM */
165 0x00, /* DPR */ 165 0x00, /* MOS7720_DPR */
166 0x01, /* DSR */ 166 0x01, /* MOS7720_DSR */
167 0x02, /* DCR */ 167 0x02, /* MOS7720_DCR */
168 0x0a, /* ECR */ 168 0x0a, /* MOS7720_ECR */
169 0x01, /* SP1_REG */ 169 0x01, /* MOS7720_SP1_REG */
170 0x02, /* SP2_REG (7720 only) */ 170 0x02, /* MOS7720_SP2_REG (7720 only) */
171 0x04, /* PP_REG (7715 only) */ 171 0x04, /* MOS7720_PP_REG (7715 only) */
172 0x08, /* SP_CONTROL_REG */ 172 0x08, /* MOS7720_SP_CONTROL_REG */
173 }; 173 };
174 return mos7715_index_lookup_table[reg]; 174 return mos7715_index_lookup_table[reg];
175} 175}
@@ -181,10 +181,10 @@ static inline __u16 get_reg_index(enum mos_regs reg)
181static inline __u16 get_reg_value(enum mos_regs reg, 181static inline __u16 get_reg_value(enum mos_regs reg,
182 unsigned int serial_portnum) 182 unsigned int serial_portnum)
183{ 183{
184 if (reg >= SP1_REG) /* control reg */ 184 if (reg >= MOS7720_SP1_REG) /* control reg */
185 return 0x0000; 185 return 0x0000;
186 186
187 else if (reg >= DPR) /* parallel port reg (7715 only) */ 187 else if (reg >= MOS7720_DPR) /* parallel port reg (7715 only) */
188 return 0x0100; 188 return 0x0100;
189 189
190 else /* serial port reg */ 190 else /* serial port reg */
@@ -252,7 +252,8 @@ static inline int mos7715_change_mode(struct mos7715_parport *mos_parport,
252 enum mos7715_pp_modes mode) 252 enum mos7715_pp_modes mode)
253{ 253{
254 mos_parport->shadowECR = mode; 254 mos_parport->shadowECR = mode;
255 write_mos_reg(mos_parport->serial, dummy, ECR, mos_parport->shadowECR); 255 write_mos_reg(mos_parport->serial, dummy, MOS7720_ECR,
256 mos_parport->shadowECR);
256 return 0; 257 return 0;
257} 258}
258 259
@@ -486,7 +487,7 @@ static void parport_mos7715_write_data(struct parport *pp, unsigned char d)
486 if (parport_prologue(pp) < 0) 487 if (parport_prologue(pp) < 0)
487 return; 488 return;
488 mos7715_change_mode(mos_parport, SPP); 489 mos7715_change_mode(mos_parport, SPP);
489 write_mos_reg(mos_parport->serial, dummy, DPR, (__u8)d); 490 write_mos_reg(mos_parport->serial, dummy, MOS7720_DPR, (__u8)d);
490 parport_epilogue(pp); 491 parport_epilogue(pp);
491} 492}
492 493
@@ -497,7 +498,7 @@ static unsigned char parport_mos7715_read_data(struct parport *pp)
497 498
498 if (parport_prologue(pp) < 0) 499 if (parport_prologue(pp) < 0)
499 return 0; 500 return 0;
500 read_mos_reg(mos_parport->serial, dummy, DPR, &d); 501 read_mos_reg(mos_parport->serial, dummy, MOS7720_DPR, &d);
501 parport_epilogue(pp); 502 parport_epilogue(pp);
502 return d; 503 return d;
503} 504}
@@ -510,7 +511,7 @@ static void parport_mos7715_write_control(struct parport *pp, unsigned char d)
510 if (parport_prologue(pp) < 0) 511 if (parport_prologue(pp) < 0)
511 return; 512 return;
512 data = ((__u8)d & 0x0f) | (mos_parport->shadowDCR & 0xf0); 513 data = ((__u8)d & 0x0f) | (mos_parport->shadowDCR & 0xf0);
513 write_mos_reg(mos_parport->serial, dummy, DCR, data); 514 write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR, data);
514 mos_parport->shadowDCR = data; 515 mos_parport->shadowDCR = data;
515 parport_epilogue(pp); 516 parport_epilogue(pp);
516} 517}
@@ -543,7 +544,8 @@ static unsigned char parport_mos7715_frob_control(struct parport *pp,
543 if (parport_prologue(pp) < 0) 544 if (parport_prologue(pp) < 0)
544 return 0; 545 return 0;
545 mos_parport->shadowDCR = (mos_parport->shadowDCR & (~mask)) ^ val; 546 mos_parport->shadowDCR = (mos_parport->shadowDCR & (~mask)) ^ val;
546 write_mos_reg(mos_parport->serial, dummy, DCR, mos_parport->shadowDCR); 547 write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR,
548 mos_parport->shadowDCR);
547 dcr = mos_parport->shadowDCR & 0x0f; 549 dcr = mos_parport->shadowDCR & 0x0f;
548 parport_epilogue(pp); 550 parport_epilogue(pp);
549 return dcr; 551 return dcr;
@@ -581,7 +583,8 @@ static void parport_mos7715_data_forward(struct parport *pp)
581 return; 583 return;
582 mos7715_change_mode(mos_parport, PS2); 584 mos7715_change_mode(mos_parport, PS2);
583 mos_parport->shadowDCR &= ~0x20; 585 mos_parport->shadowDCR &= ~0x20;
584 write_mos_reg(mos_parport->serial, dummy, DCR, mos_parport->shadowDCR); 586 write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR,
587 mos_parport->shadowDCR);
585 parport_epilogue(pp); 588 parport_epilogue(pp);
586} 589}
587 590
@@ -593,7 +596,8 @@ static void parport_mos7715_data_reverse(struct parport *pp)
593 return; 596 return;
594 mos7715_change_mode(mos_parport, PS2); 597 mos7715_change_mode(mos_parport, PS2);
595 mos_parport->shadowDCR |= 0x20; 598 mos_parport->shadowDCR |= 0x20;
596 write_mos_reg(mos_parport->serial, dummy, DCR, mos_parport->shadowDCR); 599 write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR,
600 mos_parport->shadowDCR);
597 parport_epilogue(pp); 601 parport_epilogue(pp);
598} 602}
599 603
@@ -633,8 +637,10 @@ static void parport_mos7715_restore_state(struct parport *pp,
633 spin_unlock(&release_lock); 637 spin_unlock(&release_lock);
634 return; 638 return;
635 } 639 }
636 write_parport_reg_nonblock(mos_parport, DCR, mos_parport->shadowDCR); 640 write_parport_reg_nonblock(mos_parport, MOS7720_DCR,
637 write_parport_reg_nonblock(mos_parport, ECR, mos_parport->shadowECR); 641 mos_parport->shadowDCR);
642 write_parport_reg_nonblock(mos_parport, MOS7720_ECR,
643 mos_parport->shadowECR);
638 spin_unlock(&release_lock); 644 spin_unlock(&release_lock);
639} 645}
640 646
@@ -714,14 +720,16 @@ static int mos7715_parport_init(struct usb_serial *serial)
714 init_completion(&mos_parport->syncmsg_compl); 720 init_completion(&mos_parport->syncmsg_compl);
715 721
716 /* cycle parallel port reset bit */ 722 /* cycle parallel port reset bit */
717 write_mos_reg(mos_parport->serial, dummy, PP_REG, (__u8)0x80); 723 write_mos_reg(mos_parport->serial, dummy, MOS7720_PP_REG, (__u8)0x80);
718 write_mos_reg(mos_parport->serial, dummy, PP_REG, (__u8)0x00); 724 write_mos_reg(mos_parport->serial, dummy, MOS7720_PP_REG, (__u8)0x00);
719 725
720 /* initialize device registers */ 726 /* initialize device registers */
721 mos_parport->shadowDCR = DCR_INIT_VAL; 727 mos_parport->shadowDCR = DCR_INIT_VAL;
722 write_mos_reg(mos_parport->serial, dummy, DCR, mos_parport->shadowDCR); 728 write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR,
729 mos_parport->shadowDCR);
723 mos_parport->shadowECR = ECR_INIT_VAL; 730 mos_parport->shadowECR = ECR_INIT_VAL;
724 write_mos_reg(mos_parport->serial, dummy, ECR, mos_parport->shadowECR); 731 write_mos_reg(mos_parport->serial, dummy, MOS7720_ECR,
732 mos_parport->shadowECR);
725 733
726 /* register with parport core */ 734 /* register with parport core */
727 mos_parport->pp = parport_register_port(0, PARPORT_IRQ_NONE, 735 mos_parport->pp = parport_register_port(0, PARPORT_IRQ_NONE,
@@ -1033,45 +1041,49 @@ static int mos7720_open(struct tty_struct *tty, struct usb_serial_port *port)
1033 /* Initialize MCS7720 -- Write Init values to corresponding Registers 1041 /* Initialize MCS7720 -- Write Init values to corresponding Registers
1034 * 1042 *
1035 * Register Index 1043 * Register Index
1036 * 0 : THR/RHR 1044 * 0 : MOS7720_THR/MOS7720_RHR
1037 * 1 : IER 1045 * 1 : MOS7720_IER
1038 * 2 : FCR 1046 * 2 : MOS7720_FCR
1039 * 3 : LCR 1047 * 3 : MOS7720_LCR
1040 * 4 : MCR 1048 * 4 : MOS7720_MCR
1041 * 5 : LSR 1049 * 5 : MOS7720_LSR
1042 * 6 : MSR 1050 * 6 : MOS7720_MSR
1043 * 7 : SPR 1051 * 7 : MOS7720_SPR
1044 * 1052 *
1045 * 0x08 : SP1/2 Control Reg 1053 * 0x08 : SP1/2 Control Reg
1046 */ 1054 */
1047 port_number = port->port_number; 1055 port_number = port->port_number;
1048 read_mos_reg(serial, port_number, LSR, &data); 1056 read_mos_reg(serial, port_number, MOS7720_LSR, &data);
1049 1057
1050 dev_dbg(&port->dev, "SS::%p LSR:%x\n", mos7720_port, data); 1058 dev_dbg(&port->dev, "SS::%p LSR:%x\n", mos7720_port, data);
1051 1059
1052 write_mos_reg(serial, dummy, SP1_REG, 0x02); 1060 write_mos_reg(serial, dummy, MOS7720_SP1_REG, 0x02);
1053 write_mos_reg(serial, dummy, SP2_REG, 0x02); 1061 write_mos_reg(serial, dummy, MOS7720_SP2_REG, 0x02);
1054 1062
1055 write_mos_reg(serial, port_number, IER, 0x00); 1063 write_mos_reg(serial, port_number, MOS7720_IER, 0x00);
1056 write_mos_reg(serial, port_number, FCR, 0x00); 1064 write_mos_reg(serial, port_number, MOS7720_FCR, 0x00);
1057 1065
1058 write_mos_reg(serial, port_number, FCR, 0xcf); 1066 write_mos_reg(serial, port_number, MOS7720_FCR, 0xcf);
1059 mos7720_port->shadowLCR = 0x03; 1067 mos7720_port->shadowLCR = 0x03;
1060 write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR); 1068 write_mos_reg(serial, port_number, MOS7720_LCR,
1069 mos7720_port->shadowLCR);
1061 mos7720_port->shadowMCR = 0x0b; 1070 mos7720_port->shadowMCR = 0x0b;
1062 write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR); 1071 write_mos_reg(serial, port_number, MOS7720_MCR,
1072 mos7720_port->shadowMCR);
1063 1073
1064 write_mos_reg(serial, port_number, SP_CONTROL_REG, 0x00); 1074 write_mos_reg(serial, port_number, MOS7720_SP_CONTROL_REG, 0x00);
1065 read_mos_reg(serial, dummy, SP_CONTROL_REG, &data); 1075 read_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG, &data);
1066 data = data | (port->port_number + 1); 1076 data = data | (port->port_number + 1);
1067 write_mos_reg(serial, dummy, SP_CONTROL_REG, data); 1077 write_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG, data);
1068 mos7720_port->shadowLCR = 0x83; 1078 mos7720_port->shadowLCR = 0x83;
1069 write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR); 1079 write_mos_reg(serial, port_number, MOS7720_LCR,
1070 write_mos_reg(serial, port_number, THR, 0x0c); 1080 mos7720_port->shadowLCR);
1071 write_mos_reg(serial, port_number, IER, 0x00); 1081 write_mos_reg(serial, port_number, MOS7720_THR, 0x0c);
1082 write_mos_reg(serial, port_number, MOS7720_IER, 0x00);
1072 mos7720_port->shadowLCR = 0x03; 1083 mos7720_port->shadowLCR = 0x03;
1073 write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR); 1084 write_mos_reg(serial, port_number, MOS7720_LCR,
1074 write_mos_reg(serial, port_number, IER, 0x0c); 1085 mos7720_port->shadowLCR);
1086 write_mos_reg(serial, port_number, MOS7720_IER, 0x0c);
1075 1087
1076 response = usb_submit_urb(port->read_urb, GFP_KERNEL); 1088 response = usb_submit_urb(port->read_urb, GFP_KERNEL);
1077 if (response) 1089 if (response)
@@ -1144,8 +1156,8 @@ static void mos7720_close(struct usb_serial_port *port)
1144 usb_kill_urb(port->write_urb); 1156 usb_kill_urb(port->write_urb);
1145 usb_kill_urb(port->read_urb); 1157 usb_kill_urb(port->read_urb);
1146 1158
1147 write_mos_reg(serial, port->port_number, MCR, 0x00); 1159 write_mos_reg(serial, port->port_number, MOS7720_MCR, 0x00);
1148 write_mos_reg(serial, port->port_number, IER, 0x00); 1160 write_mos_reg(serial, port->port_number, MOS7720_IER, 0x00);
1149 1161
1150 mos7720_port->open = 0; 1162 mos7720_port->open = 0;
1151} 1163}
@@ -1169,7 +1181,8 @@ static void mos7720_break(struct tty_struct *tty, int break_state)
1169 data = mos7720_port->shadowLCR & ~UART_LCR_SBC; 1181 data = mos7720_port->shadowLCR & ~UART_LCR_SBC;
1170 1182
1171 mos7720_port->shadowLCR = data; 1183 mos7720_port->shadowLCR = data;
1172 write_mos_reg(serial, port->port_number, LCR, mos7720_port->shadowLCR); 1184 write_mos_reg(serial, port->port_number, MOS7720_LCR,
1185 mos7720_port->shadowLCR);
1173} 1186}
1174 1187
1175/* 1188/*
@@ -1297,7 +1310,7 @@ static void mos7720_throttle(struct tty_struct *tty)
1297 /* if we are implementing RTS/CTS, toggle that line */ 1310 /* if we are implementing RTS/CTS, toggle that line */
1298 if (tty->termios.c_cflag & CRTSCTS) { 1311 if (tty->termios.c_cflag & CRTSCTS) {
1299 mos7720_port->shadowMCR &= ~UART_MCR_RTS; 1312 mos7720_port->shadowMCR &= ~UART_MCR_RTS;
1300 write_mos_reg(port->serial, port->port_number, MCR, 1313 write_mos_reg(port->serial, port->port_number, MOS7720_MCR,
1301 mos7720_port->shadowMCR); 1314 mos7720_port->shadowMCR);
1302 } 1315 }
1303} 1316}
@@ -1327,7 +1340,7 @@ static void mos7720_unthrottle(struct tty_struct *tty)
1327 /* if we are implementing RTS/CTS, toggle that line */ 1340 /* if we are implementing RTS/CTS, toggle that line */
1328 if (tty->termios.c_cflag & CRTSCTS) { 1341 if (tty->termios.c_cflag & CRTSCTS) {
1329 mos7720_port->shadowMCR |= UART_MCR_RTS; 1342 mos7720_port->shadowMCR |= UART_MCR_RTS;
1330 write_mos_reg(port->serial, port->port_number, MCR, 1343 write_mos_reg(port->serial, port->port_number, MOS7720_MCR,
1331 mos7720_port->shadowMCR); 1344 mos7720_port->shadowMCR);
1332 } 1345 }
1333} 1346}
@@ -1352,35 +1365,39 @@ static int set_higher_rates(struct moschip_port *mos7720_port,
1352 dev_dbg(&port->dev, "Sending Setting Commands ..........\n"); 1365 dev_dbg(&port->dev, "Sending Setting Commands ..........\n");
1353 port_number = port->port_number; 1366 port_number = port->port_number;
1354 1367
1355 write_mos_reg(serial, port_number, IER, 0x00); 1368 write_mos_reg(serial, port_number, MOS7720_IER, 0x00);
1356 write_mos_reg(serial, port_number, FCR, 0x00); 1369 write_mos_reg(serial, port_number, MOS7720_FCR, 0x00);
1357 write_mos_reg(serial, port_number, FCR, 0xcf); 1370 write_mos_reg(serial, port_number, MOS7720_FCR, 0xcf);
1358 mos7720_port->shadowMCR = 0x0b; 1371 mos7720_port->shadowMCR = 0x0b;
1359 write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR); 1372 write_mos_reg(serial, port_number, MOS7720_MCR,
1360 write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x00); 1373 mos7720_port->shadowMCR);
1374 write_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG, 0x00);
1361 1375
1362 /*********************************************** 1376 /***********************************************
1363 * Set for higher rates * 1377 * Set for higher rates *
1364 ***********************************************/ 1378 ***********************************************/
1365 /* writing baud rate verbatum into uart clock field clearly not right */ 1379 /* writing baud rate verbatum into uart clock field clearly not right */
1366 if (port_number == 0) 1380 if (port_number == 0)
1367 sp_reg = SP1_REG; 1381 sp_reg = MOS7720_SP1_REG;
1368 else 1382 else
1369 sp_reg = SP2_REG; 1383 sp_reg = MOS7720_SP2_REG;
1370 write_mos_reg(serial, dummy, sp_reg, baud * 0x10); 1384 write_mos_reg(serial, dummy, sp_reg, baud * 0x10);
1371 write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x03); 1385 write_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG, 0x03);
1372 mos7720_port->shadowMCR = 0x2b; 1386 mos7720_port->shadowMCR = 0x2b;
1373 write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR); 1387 write_mos_reg(serial, port_number, MOS7720_MCR,
1388 mos7720_port->shadowMCR);
1374 1389
1375 /*********************************************** 1390 /***********************************************
1376 * Set DLL/DLM 1391 * Set DLL/DLM
1377 ***********************************************/ 1392 ***********************************************/
1378 mos7720_port->shadowLCR = mos7720_port->shadowLCR | UART_LCR_DLAB; 1393 mos7720_port->shadowLCR = mos7720_port->shadowLCR | UART_LCR_DLAB;
1379 write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR); 1394 write_mos_reg(serial, port_number, MOS7720_LCR,
1380 write_mos_reg(serial, port_number, DLL, 0x01); 1395 mos7720_port->shadowLCR);
1381 write_mos_reg(serial, port_number, DLM, 0x00); 1396 write_mos_reg(serial, port_number, MOS7720_DLL, 0x01);
1397 write_mos_reg(serial, port_number, MOS7720_DLM, 0x00);
1382 mos7720_port->shadowLCR = mos7720_port->shadowLCR & ~UART_LCR_DLAB; 1398 mos7720_port->shadowLCR = mos7720_port->shadowLCR & ~UART_LCR_DLAB;
1383 write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR); 1399 write_mos_reg(serial, port_number, MOS7720_LCR,
1400 mos7720_port->shadowLCR);
1384 1401
1385 return 0; 1402 return 0;
1386} 1403}
@@ -1488,15 +1505,16 @@ static int send_cmd_write_baud_rate(struct moschip_port *mos7720_port,
1488 1505
1489 /* Enable access to divisor latch */ 1506 /* Enable access to divisor latch */
1490 mos7720_port->shadowLCR = mos7720_port->shadowLCR | UART_LCR_DLAB; 1507 mos7720_port->shadowLCR = mos7720_port->shadowLCR | UART_LCR_DLAB;
1491 write_mos_reg(serial, number, LCR, mos7720_port->shadowLCR); 1508 write_mos_reg(serial, number, MOS7720_LCR, mos7720_port->shadowLCR);
1492 1509
1493 /* Write the divisor */ 1510 /* Write the divisor */
1494 write_mos_reg(serial, number, DLL, (__u8)(divisor & 0xff)); 1511 write_mos_reg(serial, number, MOS7720_DLL, (__u8)(divisor & 0xff));
1495 write_mos_reg(serial, number, DLM, (__u8)((divisor & 0xff00) >> 8)); 1512 write_mos_reg(serial, number, MOS7720_DLM,
1513 (__u8)((divisor & 0xff00) >> 8));
1496 1514
1497 /* Disable access to divisor latch */ 1515 /* Disable access to divisor latch */
1498 mos7720_port->shadowLCR = mos7720_port->shadowLCR & ~UART_LCR_DLAB; 1516 mos7720_port->shadowLCR = mos7720_port->shadowLCR & ~UART_LCR_DLAB;
1499 write_mos_reg(serial, number, LCR, mos7720_port->shadowLCR); 1517 write_mos_reg(serial, number, MOS7720_LCR, mos7720_port->shadowLCR);
1500 1518
1501 return status; 1519 return status;
1502} 1520}
@@ -1600,14 +1618,16 @@ static void change_port_settings(struct tty_struct *tty,
1600 1618
1601 1619
1602 /* Disable Interrupts */ 1620 /* Disable Interrupts */
1603 write_mos_reg(serial, port_number, IER, 0x00); 1621 write_mos_reg(serial, port_number, MOS7720_IER, 0x00);
1604 write_mos_reg(serial, port_number, FCR, 0x00); 1622 write_mos_reg(serial, port_number, MOS7720_FCR, 0x00);
1605 write_mos_reg(serial, port_number, FCR, 0xcf); 1623 write_mos_reg(serial, port_number, MOS7720_FCR, 0xcf);
1606 1624
1607 /* Send the updated LCR value to the mos7720 */ 1625 /* Send the updated LCR value to the mos7720 */
1608 write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR); 1626 write_mos_reg(serial, port_number, MOS7720_LCR,
1627 mos7720_port->shadowLCR);
1609 mos7720_port->shadowMCR = 0x0b; 1628 mos7720_port->shadowMCR = 0x0b;
1610 write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR); 1629 write_mos_reg(serial, port_number, MOS7720_MCR,
1630 mos7720_port->shadowMCR);
1611 1631
1612 /* set up the MCR register and send it to the mos7720 */ 1632 /* set up the MCR register and send it to the mos7720 */
1613 mos7720_port->shadowMCR = UART_MCR_OUT2; 1633 mos7720_port->shadowMCR = UART_MCR_OUT2;
@@ -1619,14 +1639,17 @@ static void change_port_settings(struct tty_struct *tty,
1619 /* To set hardware flow control to the specified * 1639 /* To set hardware flow control to the specified *
1620 * serial port, in SP1/2_CONTROL_REG */ 1640 * serial port, in SP1/2_CONTROL_REG */
1621 if (port_number) 1641 if (port_number)
1622 write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x01); 1642 write_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG,
1643 0x01);
1623 else 1644 else
1624 write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x02); 1645 write_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG,
1646 0x02);
1625 1647
1626 } else 1648 } else
1627 mos7720_port->shadowMCR &= ~(UART_MCR_XONANY); 1649 mos7720_port->shadowMCR &= ~(UART_MCR_XONANY);
1628 1650
1629 write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR); 1651 write_mos_reg(serial, port_number, MOS7720_MCR,
1652 mos7720_port->shadowMCR);
1630 1653
1631 /* Determine divisor based on baud rate */ 1654 /* Determine divisor based on baud rate */
1632 baud = tty_get_baud_rate(tty); 1655 baud = tty_get_baud_rate(tty);
@@ -1639,7 +1662,7 @@ static void change_port_settings(struct tty_struct *tty,
1639 if (baud >= 230400) { 1662 if (baud >= 230400) {
1640 set_higher_rates(mos7720_port, baud); 1663 set_higher_rates(mos7720_port, baud);
1641 /* Enable Interrupts */ 1664 /* Enable Interrupts */
1642 write_mos_reg(serial, port_number, IER, 0x0c); 1665 write_mos_reg(serial, port_number, MOS7720_IER, 0x0c);
1643 return; 1666 return;
1644 } 1667 }
1645 1668
@@ -1650,7 +1673,7 @@ static void change_port_settings(struct tty_struct *tty,
1650 if (cflag & CBAUD) 1673 if (cflag & CBAUD)
1651 tty_encode_baud_rate(tty, baud, baud); 1674 tty_encode_baud_rate(tty, baud, baud);
1652 /* Enable Interrupts */ 1675 /* Enable Interrupts */
1653 write_mos_reg(serial, port_number, IER, 0x0c); 1676 write_mos_reg(serial, port_number, MOS7720_IER, 0x0c);
1654 1677
1655 if (port->read_urb->status != -EINPROGRESS) { 1678 if (port->read_urb->status != -EINPROGRESS) {
1656 status = usb_submit_urb(port->read_urb, GFP_KERNEL); 1679 status = usb_submit_urb(port->read_urb, GFP_KERNEL);
@@ -1725,7 +1748,7 @@ static int get_lsr_info(struct tty_struct *tty,
1725 1748
1726 count = mos7720_chars_in_buffer(tty); 1749 count = mos7720_chars_in_buffer(tty);
1727 if (count == 0) { 1750 if (count == 0) {
1728 read_mos_reg(port->serial, port_number, LSR, &data); 1751 read_mos_reg(port->serial, port_number, MOS7720_LSR, &data);
1729 if ((data & (UART_LSR_TEMT | UART_LSR_THRE)) 1752 if ((data & (UART_LSR_TEMT | UART_LSR_THRE))
1730 == (UART_LSR_TEMT | UART_LSR_THRE)) { 1753 == (UART_LSR_TEMT | UART_LSR_THRE)) {
1731 dev_dbg(&port->dev, "%s -- Empty\n", __func__); 1754 dev_dbg(&port->dev, "%s -- Empty\n", __func__);
@@ -1782,7 +1805,7 @@ static int mos7720_tiocmset(struct tty_struct *tty,
1782 mcr &= ~UART_MCR_LOOP; 1805 mcr &= ~UART_MCR_LOOP;
1783 1806
1784 mos7720_port->shadowMCR = mcr; 1807 mos7720_port->shadowMCR = mcr;
1785 write_mos_reg(port->serial, port->port_number, MCR, 1808 write_mos_reg(port->serial, port->port_number, MOS7720_MCR,
1786 mos7720_port->shadowMCR); 1809 mos7720_port->shadowMCR);
1787 1810
1788 return 0; 1811 return 0;
@@ -1827,7 +1850,7 @@ static int set_modem_info(struct moschip_port *mos7720_port, unsigned int cmd,
1827 } 1850 }
1828 1851
1829 mos7720_port->shadowMCR = mcr; 1852 mos7720_port->shadowMCR = mcr;
1830 write_mos_reg(port->serial, port->port_number, MCR, 1853 write_mos_reg(port->serial, port->port_number, MOS7720_MCR,
1831 mos7720_port->shadowMCR); 1854 mos7720_port->shadowMCR);
1832 1855
1833 return 0; 1856 return 0;
@@ -1942,7 +1965,7 @@ static int mos7720_startup(struct usb_serial *serial)
1942 } 1965 }
1943#endif 1966#endif
1944 /* LSR For Port 1 */ 1967 /* LSR For Port 1 */
1945 read_mos_reg(serial, 0, LSR, &data); 1968 read_mos_reg(serial, 0, MOS7720_LSR, &data);
1946 dev_dbg(&dev->dev, "LSR:%x\n", data); 1969 dev_dbg(&dev->dev, "LSR:%x\n", data);
1947 1970
1948 return 0; 1971 return 0;
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index f0c0c53359ad..876423b8892c 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1099,6 +1099,8 @@ static const struct usb_device_id option_ids[] = {
1099 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ 1099 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
1100 { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x68c0, 0xff), 1100 { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x68c0, 0xff),
1101 .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC73xx */ 1101 .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC73xx */
1102 { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x9041, 0xff),
1103 .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC7305/MC7355 */
1102 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, 1104 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
1103 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, 1105 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
1104 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), 1106 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
@@ -1765,6 +1767,7 @@ static const struct usb_device_id option_ids[] = {
1765 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) }, 1767 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
1766 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ 1768 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
1767 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ 1769 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
1770 { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
1768 { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) }, 1771 { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
1769 { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) }, 1772 { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
1770 { } /* Terminating entry */ 1773 { } /* Terminating entry */
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 9c63897b3a56..d156545728c2 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -145,7 +145,6 @@ static const struct usb_device_id id_table[] = {
145 {DEVICE_SWI(0x1199, 0x901c)}, /* Sierra Wireless EM7700 */ 145 {DEVICE_SWI(0x1199, 0x901c)}, /* Sierra Wireless EM7700 */
146 {DEVICE_SWI(0x1199, 0x901f)}, /* Sierra Wireless EM7355 */ 146 {DEVICE_SWI(0x1199, 0x901f)}, /* Sierra Wireless EM7355 */
147 {DEVICE_SWI(0x1199, 0x9040)}, /* Sierra Wireless Modem */ 147 {DEVICE_SWI(0x1199, 0x9040)}, /* Sierra Wireless Modem */
148 {DEVICE_SWI(0x1199, 0x9041)}, /* Sierra Wireless MC7305/MC7355 */
149 {DEVICE_SWI(0x1199, 0x9051)}, /* Netgear AirCard 340U */ 148 {DEVICE_SWI(0x1199, 0x9051)}, /* Netgear AirCard 340U */
150 {DEVICE_SWI(0x1199, 0x9053)}, /* Sierra Wireless Modem */ 149 {DEVICE_SWI(0x1199, 0x9053)}, /* Sierra Wireless Modem */
151 {DEVICE_SWI(0x1199, 0x9054)}, /* Sierra Wireless Modem */ 150 {DEVICE_SWI(0x1199, 0x9054)}, /* Sierra Wireless Modem */
@@ -158,6 +157,7 @@ static const struct usb_device_id id_table[] = {
158 {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ 157 {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
159 {DEVICE_SWI(0x413c, 0x81a8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */ 158 {DEVICE_SWI(0x413c, 0x81a8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
160 {DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ 159 {DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
160 {DEVICE_SWI(0x413c, 0x81b1)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
161 161
162 /* Huawei devices */ 162 /* Huawei devices */
163 {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */ 163 {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 46179a0828eb..07d1ecd564f7 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -289,6 +289,7 @@ static const struct usb_device_id id_table[] = {
289 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68AA, 0xFF, 0xFF, 0xFF), 289 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68AA, 0xFF, 0xFF, 0xFF),
290 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist 290 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
291 }, 291 },
292 { USB_DEVICE(0x1199, 0x68AB) }, /* Sierra Wireless AR8550 */
292 /* AT&T Direct IP LTE modems */ 293 /* AT&T Direct IP LTE modems */
293 { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF), 294 { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF),
294 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist 295 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 529066bbc7e8..46f1f13b41f1 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -1306,6 +1306,7 @@ static void __exit usb_serial_exit(void)
1306 tty_unregister_driver(usb_serial_tty_driver); 1306 tty_unregister_driver(usb_serial_tty_driver);
1307 put_tty_driver(usb_serial_tty_driver); 1307 put_tty_driver(usb_serial_tty_driver);
1308 bus_unregister(&usb_serial_bus_type); 1308 bus_unregister(&usb_serial_bus_type);
1309 idr_destroy(&serial_minors);
1309} 1310}
1310 1311
1311 1312
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index caf188800c67..6b2479123de7 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2065,6 +2065,18 @@ UNUSUAL_DEV( 0x1908, 0x3335, 0x0200, 0x0200,
2065 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 2065 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2066 US_FL_NO_READ_DISC_INFO ), 2066 US_FL_NO_READ_DISC_INFO ),
2067 2067
2068/* Reported by Oliver Neukum <oneukum@suse.com>
2069 * This device morphes spontaneously into another device if the access
2070 * pattern of Windows isn't followed. Thus writable media would be dirty
2071 * if the initial instance is used. So the device is limited to its
2072 * virtual CD.
2073 * And yes, the concept that BCD goes up to 9 is not heeded */
2074UNUSUAL_DEV( 0x19d2, 0x1225, 0x0000, 0xffff,
2075 "ZTE,Incorporated",
2076 "ZTE WCDMA Technologies MSM",
2077 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2078 US_FL_SINGLE_LUN ),
2079
2068/* Reported by Sven Geggus <sven-usbst@geggus.net> 2080/* Reported by Sven Geggus <sven-usbst@geggus.net>
2069 * This encrypted pen drive returns bogus data for the initial READ(10). 2081 * This encrypted pen drive returns bogus data for the initial READ(10).
2070 */ 2082 */
@@ -2074,6 +2086,17 @@ UNUSUAL_DEV( 0x1b1c, 0x1ab5, 0x0200, 0x0200,
2074 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 2086 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2075 US_FL_INITIAL_READ10 ), 2087 US_FL_INITIAL_READ10 ),
2076 2088
2089/* Reported by Hans de Goede <hdegoede@redhat.com>
2090 * These are mini projectors using USB for both power and video data transport
2091 * The usb-storage interface is a virtual windows driver CD, which the gm12u320
2092 * driver automatically converts into framebuffer & kms dri device nodes.
2093 */
2094UNUSUAL_DEV( 0x1de1, 0xc102, 0x0000, 0xffff,
2095 "Grain-media Technology Corp.",
2096 "USB3.0 Device GM12U320",
2097 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2098 US_FL_IGNORE_DEVICE ),
2099
2077/* Patch by Richard Schütz <r.schtz@t-online.de> 2100/* Patch by Richard Schütz <r.schtz@t-online.de>
2078 * This external hard drive enclosure uses a JMicron chip which 2101 * This external hard drive enclosure uses a JMicron chip which
2079 * needs the US_FL_IGNORE_RESIDUE flag to work properly. */ 2102 * needs the US_FL_IGNORE_RESIDUE flag to work properly. */
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 2fb29dfeffbd..563c510f285c 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -689,6 +689,23 @@ struct vfio_device *vfio_device_get_from_dev(struct device *dev)
689} 689}
690EXPORT_SYMBOL_GPL(vfio_device_get_from_dev); 690EXPORT_SYMBOL_GPL(vfio_device_get_from_dev);
691 691
692static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group,
693 char *buf)
694{
695 struct vfio_device *device;
696
697 mutex_lock(&group->device_lock);
698 list_for_each_entry(device, &group->device_list, group_next) {
699 if (!strcmp(dev_name(device->dev), buf)) {
700 vfio_device_get(device);
701 break;
702 }
703 }
704 mutex_unlock(&group->device_lock);
705
706 return device;
707}
708
692/* 709/*
693 * Caller must hold a reference to the vfio_device 710 * Caller must hold a reference to the vfio_device
694 */ 711 */
@@ -1198,53 +1215,53 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
1198{ 1215{
1199 struct vfio_device *device; 1216 struct vfio_device *device;
1200 struct file *filep; 1217 struct file *filep;
1201 int ret = -ENODEV; 1218 int ret;
1202 1219
1203 if (0 == atomic_read(&group->container_users) || 1220 if (0 == atomic_read(&group->container_users) ||
1204 !group->container->iommu_driver || !vfio_group_viable(group)) 1221 !group->container->iommu_driver || !vfio_group_viable(group))
1205 return -EINVAL; 1222 return -EINVAL;
1206 1223
1207 mutex_lock(&group->device_lock); 1224 device = vfio_device_get_from_name(group, buf);
1208 list_for_each_entry(device, &group->device_list, group_next) { 1225 if (!device)
1209 if (strcmp(dev_name(device->dev), buf)) 1226 return -ENODEV;
1210 continue;
1211 1227
1212 ret = device->ops->open(device->device_data); 1228 ret = device->ops->open(device->device_data);
1213 if (ret) 1229 if (ret) {
1214 break; 1230 vfio_device_put(device);
1215 /* 1231 return ret;
1216 * We can't use anon_inode_getfd() because we need to modify 1232 }
1217 * the f_mode flags directly to allow more than just ioctls
1218 */
1219 ret = get_unused_fd_flags(O_CLOEXEC);
1220 if (ret < 0) {
1221 device->ops->release(device->device_data);
1222 break;
1223 }
1224 1233
1225 filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops, 1234 /*
1226 device, O_RDWR); 1235 * We can't use anon_inode_getfd() because we need to modify
1227 if (IS_ERR(filep)) { 1236 * the f_mode flags directly to allow more than just ioctls
1228 put_unused_fd(ret); 1237 */
1229 ret = PTR_ERR(filep); 1238 ret = get_unused_fd_flags(O_CLOEXEC);
1230 device->ops->release(device->device_data); 1239 if (ret < 0) {
1231 break; 1240 device->ops->release(device->device_data);
1232 } 1241 vfio_device_put(device);
1242 return ret;
1243 }
1233 1244
1234 /* 1245 filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops,
1235 * TODO: add an anon_inode interface to do this. 1246 device, O_RDWR);
1236 * Appears to be missing by lack of need rather than 1247 if (IS_ERR(filep)) {
1237 * explicitly prevented. Now there's need. 1248 put_unused_fd(ret);
1238 */ 1249 ret = PTR_ERR(filep);
1239 filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); 1250 device->ops->release(device->device_data);
1251 vfio_device_put(device);
1252 return ret;
1253 }
1254
1255 /*
1256 * TODO: add an anon_inode interface to do this.
1257 * Appears to be missing by lack of need rather than
1258 * explicitly prevented. Now there's need.
1259 */
1260 filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
1240 1261
1241 vfio_device_get(device); 1262 atomic_inc(&group->container_users);
1242 atomic_inc(&group->container_users);
1243 1263
1244 fd_install(ret, filep); 1264 fd_install(ret, filep);
1245 break;
1246 }
1247 mutex_unlock(&group->device_lock);
1248 1265
1249 return ret; 1266 return ret;
1250} 1267}
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 9e8e004bb1c3..eec2f11809ff 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -22,14 +22,20 @@
22#include <linux/file.h> 22#include <linux/file.h>
23#include <linux/highmem.h> 23#include <linux/highmem.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/vmalloc.h>
25#include <linux/kthread.h> 26#include <linux/kthread.h>
26#include <linux/cgroup.h> 27#include <linux/cgroup.h>
27#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/sort.h>
28 30
29#include "vhost.h" 31#include "vhost.h"
30 32
33static ushort max_mem_regions = 64;
34module_param(max_mem_regions, ushort, 0444);
35MODULE_PARM_DESC(max_mem_regions,
36 "Maximum number of memory regions in memory map. (default: 64)");
37
31enum { 38enum {
32 VHOST_MEMORY_MAX_NREGIONS = 64,
33 VHOST_MEMORY_F_LOG = 0x1, 39 VHOST_MEMORY_F_LOG = 0x1,
34}; 40};
35 41
@@ -543,7 +549,7 @@ void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
543 fput(dev->log_file); 549 fput(dev->log_file);
544 dev->log_file = NULL; 550 dev->log_file = NULL;
545 /* No one will access memory at this point */ 551 /* No one will access memory at this point */
546 kfree(dev->memory); 552 kvfree(dev->memory);
547 dev->memory = NULL; 553 dev->memory = NULL;
548 WARN_ON(!list_empty(&dev->work_list)); 554 WARN_ON(!list_empty(&dev->work_list));
549 if (dev->worker) { 555 if (dev->worker) {
@@ -663,6 +669,25 @@ int vhost_vq_access_ok(struct vhost_virtqueue *vq)
663} 669}
664EXPORT_SYMBOL_GPL(vhost_vq_access_ok); 670EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
665 671
672static int vhost_memory_reg_sort_cmp(const void *p1, const void *p2)
673{
674 const struct vhost_memory_region *r1 = p1, *r2 = p2;
675 if (r1->guest_phys_addr < r2->guest_phys_addr)
676 return 1;
677 if (r1->guest_phys_addr > r2->guest_phys_addr)
678 return -1;
679 return 0;
680}
681
682static void *vhost_kvzalloc(unsigned long size)
683{
684 void *n = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
685
686 if (!n)
687 n = vzalloc(size);
688 return n;
689}
690
666static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) 691static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
667{ 692{
668 struct vhost_memory mem, *newmem, *oldmem; 693 struct vhost_memory mem, *newmem, *oldmem;
@@ -673,21 +698,23 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
673 return -EFAULT; 698 return -EFAULT;
674 if (mem.padding) 699 if (mem.padding)
675 return -EOPNOTSUPP; 700 return -EOPNOTSUPP;
676 if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS) 701 if (mem.nregions > max_mem_regions)
677 return -E2BIG; 702 return -E2BIG;
678 newmem = kmalloc(size + mem.nregions * sizeof *m->regions, GFP_KERNEL); 703 newmem = vhost_kvzalloc(size + mem.nregions * sizeof(*m->regions));
679 if (!newmem) 704 if (!newmem)
680 return -ENOMEM; 705 return -ENOMEM;
681 706
682 memcpy(newmem, &mem, size); 707 memcpy(newmem, &mem, size);
683 if (copy_from_user(newmem->regions, m->regions, 708 if (copy_from_user(newmem->regions, m->regions,
684 mem.nregions * sizeof *m->regions)) { 709 mem.nregions * sizeof *m->regions)) {
685 kfree(newmem); 710 kvfree(newmem);
686 return -EFAULT; 711 return -EFAULT;
687 } 712 }
713 sort(newmem->regions, newmem->nregions, sizeof(*newmem->regions),
714 vhost_memory_reg_sort_cmp, NULL);
688 715
689 if (!memory_access_ok(d, newmem, 0)) { 716 if (!memory_access_ok(d, newmem, 0)) {
690 kfree(newmem); 717 kvfree(newmem);
691 return -EFAULT; 718 return -EFAULT;
692 } 719 }
693 oldmem = d->memory; 720 oldmem = d->memory;
@@ -699,7 +726,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
699 d->vqs[i]->memory = newmem; 726 d->vqs[i]->memory = newmem;
700 mutex_unlock(&d->vqs[i]->mutex); 727 mutex_unlock(&d->vqs[i]->mutex);
701 } 728 }
702 kfree(oldmem); 729 kvfree(oldmem);
703 return 0; 730 return 0;
704} 731}
705 732
@@ -965,6 +992,7 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
965 } 992 }
966 if (eventfp != d->log_file) { 993 if (eventfp != d->log_file) {
967 filep = d->log_file; 994 filep = d->log_file;
995 d->log_file = eventfp;
968 ctx = d->log_ctx; 996 ctx = d->log_ctx;
969 d->log_ctx = eventfp ? 997 d->log_ctx = eventfp ?
970 eventfd_ctx_fileget(eventfp) : NULL; 998 eventfd_ctx_fileget(eventfp) : NULL;
@@ -992,17 +1020,22 @@ EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
992static const struct vhost_memory_region *find_region(struct vhost_memory *mem, 1020static const struct vhost_memory_region *find_region(struct vhost_memory *mem,
993 __u64 addr, __u32 len) 1021 __u64 addr, __u32 len)
994{ 1022{
995 struct vhost_memory_region *reg; 1023 const struct vhost_memory_region *reg;
996 int i; 1024 int start = 0, end = mem->nregions;
997 1025
998 /* linear search is not brilliant, but we really have on the order of 6 1026 while (start < end) {
999 * regions in practice */ 1027 int slot = start + (end - start) / 2;
1000 for (i = 0; i < mem->nregions; ++i) { 1028 reg = mem->regions + slot;
1001 reg = mem->regions + i; 1029 if (addr >= reg->guest_phys_addr)
1002 if (reg->guest_phys_addr <= addr && 1030 end = slot;
1003 reg->guest_phys_addr + reg->memory_size - 1 >= addr) 1031 else
1004 return reg; 1032 start = slot + 1;
1005 } 1033 }
1034
1035 reg = mem->regions + start;
1036 if (addr >= reg->guest_phys_addr &&
1037 reg->guest_phys_addr + reg->memory_size > addr)
1038 return reg;
1006 return NULL; 1039 return NULL;
1007} 1040}
1008 1041
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 658c34bb9076..1aaf89300621 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -1306,10 +1306,11 @@ static void fbcon_cursor(struct vc_data *vc, int mode)
1306 int y; 1306 int y;
1307 int c = scr_readw((u16 *) vc->vc_pos); 1307 int c = scr_readw((u16 *) vc->vc_pos);
1308 1308
1309 ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms);
1310
1309 if (fbcon_is_inactive(vc, info) || vc->vc_deccm != 1) 1311 if (fbcon_is_inactive(vc, info) || vc->vc_deccm != 1)
1310 return; 1312 return;
1311 1313
1312 ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms);
1313 if (vc->vc_cursor_type & 0x10) 1314 if (vc->vc_cursor_type & 0x10)
1314 fbcon_del_cursor_timer(info); 1315 fbcon_del_cursor_timer(info);
1315 else 1316 else
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 2d98de535e0f..f888561568d9 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -298,7 +298,7 @@ config FB_ARMCLCD
298 298
299# Helper logic selected only by the ARM Versatile platform family. 299# Helper logic selected only by the ARM Versatile platform family.
300config PLAT_VERSATILE_CLCD 300config PLAT_VERSATILE_CLCD
301 def_bool ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS 301 def_bool ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || ARCH_INTEGRATOR
302 depends on ARM 302 depends on ARM
303 depends on FB_ARMCLCD && FB=y 303 depends on FB_ARMCLCD && FB=y
304 304
diff --git a/drivers/video/fbdev/omap2/dss/dss-of.c b/drivers/video/fbdev/omap2/dss/dss-of.c
index 928ee639c0c1..bf407b6ba15c 100644
--- a/drivers/video/fbdev/omap2/dss/dss-of.c
+++ b/drivers/video/fbdev/omap2/dss/dss-of.c
@@ -60,6 +60,8 @@ omapdss_of_get_next_port(const struct device_node *parent,
60 } 60 }
61 prev = port; 61 prev = port;
62 } while (of_node_cmp(port->name, "port") != 0); 62 } while (of_node_cmp(port->name, "port") != 0);
63
64 of_node_put(ports);
63 } 65 }
64 66
65 return port; 67 return port;
@@ -94,7 +96,7 @@ struct device_node *dss_of_port_get_parent_device(struct device_node *port)
94 if (!port) 96 if (!port)
95 return NULL; 97 return NULL;
96 98
97 np = of_get_next_parent(port); 99 np = of_get_parent(port);
98 100
99 for (i = 0; i < 2 && np; ++i) { 101 for (i = 0; i < 2 && np; ++i) {
100 struct property *prop; 102 struct property *prop;
diff --git a/drivers/video/fbdev/pxa3xx-gcu.c b/drivers/video/fbdev/pxa3xx-gcu.c
index 86bd457d039d..50bce45e7f3d 100644
--- a/drivers/video/fbdev/pxa3xx-gcu.c
+++ b/drivers/video/fbdev/pxa3xx-gcu.c
@@ -653,7 +653,7 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev)
653 goto err_free_dma; 653 goto err_free_dma;
654 } 654 }
655 655
656 ret = clk_enable(priv->clk); 656 ret = clk_prepare_enable(priv->clk);
657 if (ret < 0) { 657 if (ret < 0) {
658 dev_err(dev, "failed to enable clock\n"); 658 dev_err(dev, "failed to enable clock\n");
659 goto err_misc_deregister; 659 goto err_misc_deregister;
@@ -685,7 +685,7 @@ err_misc_deregister:
685 misc_deregister(&priv->misc_dev); 685 misc_deregister(&priv->misc_dev);
686 686
687err_disable_clk: 687err_disable_clk:
688 clk_disable(priv->clk); 688 clk_disable_unprepare(priv->clk);
689 689
690 return ret; 690 return ret;
691} 691}
diff --git a/drivers/video/of_videomode.c b/drivers/video/of_videomode.c
index 111c2d1911d3..b5102aa6090d 100644
--- a/drivers/video/of_videomode.c
+++ b/drivers/video/of_videomode.c
@@ -44,11 +44,9 @@ int of_get_videomode(struct device_node *np, struct videomode *vm,
44 index = disp->native_mode; 44 index = disp->native_mode;
45 45
46 ret = videomode_from_timings(disp, vm, index); 46 ret = videomode_from_timings(disp, vm, index);
47 if (ret)
48 return ret;
49 47
50 display_timings_release(disp); 48 display_timings_release(disp);
51 49
52 return 0; 50 return ret;
53} 51}
54EXPORT_SYMBOL_GPL(of_get_videomode); 52EXPORT_SYMBOL_GPL(of_get_videomode);
diff --git a/drivers/virtio/virtio_input.c b/drivers/virtio/virtio_input.c
index 60e2a1677563..c96944b59856 100644
--- a/drivers/virtio/virtio_input.c
+++ b/drivers/virtio/virtio_input.c
@@ -313,6 +313,7 @@ err_init_vq:
313static void virtinput_remove(struct virtio_device *vdev) 313static void virtinput_remove(struct virtio_device *vdev)
314{ 314{
315 struct virtio_input *vi = vdev->priv; 315 struct virtio_input *vi = vdev->priv;
316 void *buf;
316 unsigned long flags; 317 unsigned long flags;
317 318
318 spin_lock_irqsave(&vi->lock, flags); 319 spin_lock_irqsave(&vi->lock, flags);
@@ -320,6 +321,9 @@ static void virtinput_remove(struct virtio_device *vdev)
320 spin_unlock_irqrestore(&vi->lock, flags); 321 spin_unlock_irqrestore(&vi->lock, flags);
321 322
322 input_unregister_device(vi->idev); 323 input_unregister_device(vi->idev);
324 vdev->config->reset(vdev);
325 while ((buf = virtqueue_detach_unused_buf(vi->sts)) != NULL)
326 kfree(buf);
323 vdev->config->del_vqs(vdev); 327 vdev->config->del_vqs(vdev);
324 kfree(vi); 328 kfree(vi);
325} 329}
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index c1b03f4235b9..4e7fec36f5c3 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -4,7 +4,7 @@
4 * Watchdog driver for ARM SP805 watchdog module 4 * Watchdog driver for ARM SP805 watchdog module
5 * 5 *
6 * Copyright (C) 2010 ST Microelectronics 6 * Copyright (C) 2010 ST Microelectronics
7 * Viresh Kumar <viresh.linux@gmail.com> 7 * Viresh Kumar <vireshk@kernel.org>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2 or later. This program is licensed "as is" without any 10 * License version 2 or later. This program is licensed "as is" without any
@@ -303,6 +303,6 @@ static struct amba_driver sp805_wdt_driver = {
303 303
304module_amba_driver(sp805_wdt_driver); 304module_amba_driver(sp805_wdt_driver);
305 305
306MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); 306MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
307MODULE_DESCRIPTION("ARM SP805 Watchdog Driver"); 307MODULE_DESCRIPTION("ARM SP805 Watchdog Driver");
308MODULE_LICENSE("GPL"); 308MODULE_LICENSE("GPL");
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index fd933695f232..bf4a23c7c591 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -472,7 +472,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
472} 472}
473 473
474/* 474/*
475 * We avoid multiple worker processes conflicting via the balloon mutex. 475 * As this is a work item it is guaranteed to run as a single instance only.
476 * We may of course race updates of the target counts (which are protected 476 * We may of course race updates of the target counts (which are protected
477 * by the balloon lock), or with changes to the Xen hard limit, but we will 477 * by the balloon lock), or with changes to the Xen hard limit, but we will
478 * recover from these in time. 478 * recover from these in time.
@@ -482,9 +482,10 @@ static void balloon_process(struct work_struct *work)
482 enum bp_state state = BP_DONE; 482 enum bp_state state = BP_DONE;
483 long credit; 483 long credit;
484 484
485 mutex_lock(&balloon_mutex);
486 485
487 do { 486 do {
487 mutex_lock(&balloon_mutex);
488
488 credit = current_credit(); 489 credit = current_credit();
489 490
490 if (credit > 0) { 491 if (credit > 0) {
@@ -499,17 +500,15 @@ static void balloon_process(struct work_struct *work)
499 500
500 state = update_schedule(state); 501 state = update_schedule(state);
501 502
502#ifndef CONFIG_PREEMPT 503 mutex_unlock(&balloon_mutex);
503 if (need_resched()) 504
504 schedule(); 505 cond_resched();
505#endif 506
506 } while (credit && state == BP_DONE); 507 } while (credit && state == BP_DONE);
507 508
508 /* Schedule more work if there is some still to be done. */ 509 /* Schedule more work if there is some still to be done. */
509 if (state == BP_EAGAIN) 510 if (state == BP_EAGAIN)
510 schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ); 511 schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ);
511
512 mutex_unlock(&balloon_mutex);
513} 512}
514 513
515/* Resets the Xen limit, sets new target, and kicks off processing. */ 514/* Resets the Xen limit, sets new target, and kicks off processing. */
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 67b9163db718..0dbb222daaf1 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -568,12 +568,14 @@ static int gntdev_release(struct inode *inode, struct file *flip)
568 568
569 pr_debug("priv %p\n", priv); 569 pr_debug("priv %p\n", priv);
570 570
571 mutex_lock(&priv->lock);
571 while (!list_empty(&priv->maps)) { 572 while (!list_empty(&priv->maps)) {
572 map = list_entry(priv->maps.next, struct grant_map, next); 573 map = list_entry(priv->maps.next, struct grant_map, next);
573 list_del(&map->next); 574 list_del(&map->next);
574 gntdev_put_map(NULL /* already removed */, map); 575 gntdev_put_map(NULL /* already removed */, map);
575 } 576 }
576 WARN_ON(!list_empty(&priv->freeable_maps)); 577 WARN_ON(!list_empty(&priv->freeable_maps));
578 mutex_unlock(&priv->lock);
577 579
578 if (use_ptemod) 580 if (use_ptemod)
579 mmu_notifier_unregister(&priv->mn, priv->mm); 581 mmu_notifier_unregister(&priv->mn, priv->mm);
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index 9ad327238ba9..e30353575d5d 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -814,8 +814,10 @@ static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
814 814
815 rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles, 815 rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles,
816 addrs); 816 addrs);
817 if (!rv) 817 if (!rv) {
818 vunmap(vaddr); 818 vunmap(vaddr);
819 free_xenballooned_pages(node->nr_handles, node->hvm.pages);
820 }
819 else 821 else
820 WARN(1, "Leaking %p, size %u page(s)\n", vaddr, 822 WARN(1, "Leaking %p, size %u page(s)\n", vaddr,
821 node->nr_handles); 823 node->nr_handles);
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 862fbc206755..564a7de17d99 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -378,7 +378,7 @@ int btrfs_dev_replace_start(struct btrfs_root *root,
378 378
379 ret = btrfs_kobj_add_device(tgt_device->fs_devices, tgt_device); 379 ret = btrfs_kobj_add_device(tgt_device->fs_devices, tgt_device);
380 if (ret) 380 if (ret)
381 btrfs_error(root->fs_info, ret, "kobj add dev failed"); 381 btrfs_err(root->fs_info, "kobj add dev failed %d\n", ret);
382 382
383 printk_in_rcu(KERN_INFO 383 printk_in_rcu(KERN_INFO
384 "BTRFS: dev_replace from %s (devid %llu) to %s started\n", 384 "BTRFS: dev_replace from %s (devid %llu) to %s started\n",
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index a9aadb2ad525..f556c3732c2c 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2842,6 +2842,7 @@ int open_ctree(struct super_block *sb,
2842 !extent_buffer_uptodate(chunk_root->node)) { 2842 !extent_buffer_uptodate(chunk_root->node)) {
2843 printk(KERN_ERR "BTRFS: failed to read chunk root on %s\n", 2843 printk(KERN_ERR "BTRFS: failed to read chunk root on %s\n",
2844 sb->s_id); 2844 sb->s_id);
2845 chunk_root->node = NULL;
2845 goto fail_tree_roots; 2846 goto fail_tree_roots;
2846 } 2847 }
2847 btrfs_set_root_node(&chunk_root->root_item, chunk_root->node); 2848 btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
@@ -2879,7 +2880,7 @@ retry_root_backup:
2879 !extent_buffer_uptodate(tree_root->node)) { 2880 !extent_buffer_uptodate(tree_root->node)) {
2880 printk(KERN_WARNING "BTRFS: failed to read tree root on %s\n", 2881 printk(KERN_WARNING "BTRFS: failed to read tree root on %s\n",
2881 sb->s_id); 2882 sb->s_id);
2882 2883 tree_root->node = NULL;
2883 goto recovery_tree_root; 2884 goto recovery_tree_root;
2884 } 2885 }
2885 2886
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 1c2bd1723e40..07204bf601ed 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2296,9 +2296,22 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2296static inline struct btrfs_delayed_ref_node * 2296static inline struct btrfs_delayed_ref_node *
2297select_delayed_ref(struct btrfs_delayed_ref_head *head) 2297select_delayed_ref(struct btrfs_delayed_ref_head *head)
2298{ 2298{
2299 struct btrfs_delayed_ref_node *ref;
2300
2299 if (list_empty(&head->ref_list)) 2301 if (list_empty(&head->ref_list))
2300 return NULL; 2302 return NULL;
2301 2303
2304 /*
2305 * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
2306 * This is to prevent a ref count from going down to zero, which deletes
2307 * the extent item from the extent tree, when there still are references
2308 * to add, which would fail because they would not find the extent item.
2309 */
2310 list_for_each_entry(ref, &head->ref_list, list) {
2311 if (ref->action == BTRFS_ADD_DELAYED_REF)
2312 return ref;
2313 }
2314
2302 return list_entry(head->ref_list.next, struct btrfs_delayed_ref_node, 2315 return list_entry(head->ref_list.next, struct btrfs_delayed_ref_node,
2303 list); 2316 list);
2304} 2317}
@@ -4214,6 +4227,24 @@ out:
4214 space_info->chunk_alloc = 0; 4227 space_info->chunk_alloc = 0;
4215 spin_unlock(&space_info->lock); 4228 spin_unlock(&space_info->lock);
4216 mutex_unlock(&fs_info->chunk_mutex); 4229 mutex_unlock(&fs_info->chunk_mutex);
4230 /*
4231 * When we allocate a new chunk we reserve space in the chunk block
4232 * reserve to make sure we can COW nodes/leafs in the chunk tree or
4233 * add new nodes/leafs to it if we end up needing to do it when
4234 * inserting the chunk item and updating device items as part of the
4235 * second phase of chunk allocation, performed by
4236 * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
4237 * large number of new block groups to create in our transaction
4238 * handle's new_bgs list to avoid exhausting the chunk block reserve
4239 * in extreme cases - like having a single transaction create many new
4240 * block groups when starting to write out the free space caches of all
4241 * the block groups that were made dirty during the lifetime of the
4242 * transaction.
4243 */
4244 if (trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) {
4245 btrfs_create_pending_block_groups(trans, trans->root);
4246 btrfs_trans_release_chunk_metadata(trans);
4247 }
4217 return ret; 4248 return ret;
4218} 4249}
4219 4250
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index b33c0cf02668..e33dff356460 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -4209,7 +4209,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
4209 u64 extent_num_bytes = 0; 4209 u64 extent_num_bytes = 0;
4210 u64 extent_offset = 0; 4210 u64 extent_offset = 0;
4211 u64 item_end = 0; 4211 u64 item_end = 0;
4212 u64 last_size = (u64)-1; 4212 u64 last_size = new_size;
4213 u32 found_type = (u8)-1; 4213 u32 found_type = (u8)-1;
4214 int found_extent; 4214 int found_extent;
4215 int del_item; 4215 int del_item;
@@ -4493,8 +4493,7 @@ out:
4493 btrfs_abort_transaction(trans, root, ret); 4493 btrfs_abort_transaction(trans, root, ret);
4494 } 4494 }
4495error: 4495error:
4496 if (last_size != (u64)-1 && 4496 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
4497 root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
4498 btrfs_ordered_update_i_size(inode, last_size, NULL); 4497 btrfs_ordered_update_i_size(inode, last_size, NULL);
4499 4498
4500 btrfs_free_path(path); 4499 btrfs_free_path(path);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 5d91776e12a2..0770c91586ca 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -3090,7 +3090,7 @@ out_unlock:
3090static long btrfs_ioctl_file_extent_same(struct file *file, 3090static long btrfs_ioctl_file_extent_same(struct file *file,
3091 struct btrfs_ioctl_same_args __user *argp) 3091 struct btrfs_ioctl_same_args __user *argp)
3092{ 3092{
3093 struct btrfs_ioctl_same_args *same; 3093 struct btrfs_ioctl_same_args *same = NULL;
3094 struct btrfs_ioctl_same_extent_info *info; 3094 struct btrfs_ioctl_same_extent_info *info;
3095 struct inode *src = file_inode(file); 3095 struct inode *src = file_inode(file);
3096 u64 off; 3096 u64 off;
@@ -3120,6 +3120,7 @@ static long btrfs_ioctl_file_extent_same(struct file *file,
3120 3120
3121 if (IS_ERR(same)) { 3121 if (IS_ERR(same)) {
3122 ret = PTR_ERR(same); 3122 ret = PTR_ERR(same);
3123 same = NULL;
3123 goto out; 3124 goto out;
3124 } 3125 }
3125 3126
@@ -3190,6 +3191,7 @@ static long btrfs_ioctl_file_extent_same(struct file *file,
3190 3191
3191out: 3192out:
3192 mnt_drop_write_file(file); 3193 mnt_drop_write_file(file);
3194 kfree(same);
3193 return ret; 3195 return ret;
3194} 3196}
3195 3197
@@ -3586,6 +3588,20 @@ process_slot:
3586 u64 trim = 0; 3588 u64 trim = 0;
3587 u64 aligned_end = 0; 3589 u64 aligned_end = 0;
3588 3590
3591 /*
3592 * Don't copy an inline extent into an offset
3593 * greater than zero. Having an inline extent
3594 * at such an offset results in chaos as btrfs
3595 * isn't prepared for such cases. Just skip
3596 * this case for the same reasons as commented
3597 * at btrfs_ioctl_clone().
3598 */
3599 if (last_dest_end > 0) {
3600 ret = -EOPNOTSUPP;
3601 btrfs_end_transaction(trans, root);
3602 goto out;
3603 }
3604
3589 if (off > key.offset) { 3605 if (off > key.offset) {
3590 skip = off - key.offset; 3606 skip = off - key.offset;
3591 new_key.offset += skip; 3607 new_key.offset += skip;
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index e9ace099162c..8a8202956576 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1651,6 +1651,11 @@ static int qgroup_update_counters(struct btrfs_fs_info *fs_info,
1651 /* Exclusive -> exclusive, nothing changed */ 1651 /* Exclusive -> exclusive, nothing changed */
1652 } 1652 }
1653 } 1653 }
1654
1655 /* For exclusive extent, free its reserved bytes too */
1656 if (nr_old_roots == 0 && nr_new_roots == 1 &&
1657 cur_new_count == nr_new_roots)
1658 qg->reserved -= num_bytes;
1654 if (dirty) 1659 if (dirty)
1655 qgroup_dirty(fs_info, qg); 1660 qgroup_dirty(fs_info, qg);
1656 } 1661 }
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index c0f18e7266b6..f5021fcb154e 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -761,7 +761,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
761 761
762 if (!list_empty(&trans->ordered)) { 762 if (!list_empty(&trans->ordered)) {
763 spin_lock(&info->trans_lock); 763 spin_lock(&info->trans_lock);
764 list_splice(&trans->ordered, &cur_trans->pending_ordered); 764 list_splice_init(&trans->ordered, &cur_trans->pending_ordered);
765 spin_unlock(&info->trans_lock); 765 spin_unlock(&info->trans_lock);
766 } 766 }
767 767
@@ -1866,7 +1866,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1866 } 1866 }
1867 1867
1868 spin_lock(&root->fs_info->trans_lock); 1868 spin_lock(&root->fs_info->trans_lock);
1869 list_splice(&trans->ordered, &cur_trans->pending_ordered); 1869 list_splice_init(&trans->ordered, &cur_trans->pending_ordered);
1870 if (cur_trans->state >= TRANS_STATE_COMMIT_START) { 1870 if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
1871 spin_unlock(&root->fs_info->trans_lock); 1871 spin_unlock(&root->fs_info->trans_lock);
1872 atomic_inc(&cur_trans->use_count); 1872 atomic_inc(&cur_trans->use_count);
@@ -2152,7 +2152,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
2152 2152
2153 kmem_cache_free(btrfs_trans_handle_cachep, trans); 2153 kmem_cache_free(btrfs_trans_handle_cachep, trans);
2154 2154
2155 if (current != root->fs_info->transaction_kthread) 2155 if (current != root->fs_info->transaction_kthread &&
2156 current != root->fs_info->cleaner_kthread)
2156 btrfs_run_delayed_iputs(root); 2157 btrfs_run_delayed_iputs(root);
2157 2158
2158 return ret; 2159 return ret;
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index dc10c9dd36c1..ddd5e9471290 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1506,7 +1506,6 @@ static int __mark_caps_flushing(struct inode *inode,
1506 1506
1507 swap(cf, ci->i_prealloc_cap_flush); 1507 swap(cf, ci->i_prealloc_cap_flush);
1508 cf->caps = flushing; 1508 cf->caps = flushing;
1509 cf->kick = false;
1510 1509
1511 spin_lock(&mdsc->cap_dirty_lock); 1510 spin_lock(&mdsc->cap_dirty_lock);
1512 list_del_init(&ci->i_dirty_item); 1511 list_del_init(&ci->i_dirty_item);
@@ -2123,8 +2122,7 @@ static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
2123 2122
2124static int __kick_flushing_caps(struct ceph_mds_client *mdsc, 2123static int __kick_flushing_caps(struct ceph_mds_client *mdsc,
2125 struct ceph_mds_session *session, 2124 struct ceph_mds_session *session,
2126 struct ceph_inode_info *ci, 2125 struct ceph_inode_info *ci)
2127 bool kick_all)
2128{ 2126{
2129 struct inode *inode = &ci->vfs_inode; 2127 struct inode *inode = &ci->vfs_inode;
2130 struct ceph_cap *cap; 2128 struct ceph_cap *cap;
@@ -2150,9 +2148,7 @@ static int __kick_flushing_caps(struct ceph_mds_client *mdsc,
2150 2148
2151 for (n = rb_first(&ci->i_cap_flush_tree); n; n = rb_next(n)) { 2149 for (n = rb_first(&ci->i_cap_flush_tree); n; n = rb_next(n)) {
2152 cf = rb_entry(n, struct ceph_cap_flush, i_node); 2150 cf = rb_entry(n, struct ceph_cap_flush, i_node);
2153 if (cf->tid < first_tid) 2151 if (cf->tid >= first_tid)
2154 continue;
2155 if (kick_all || cf->kick)
2156 break; 2152 break;
2157 } 2153 }
2158 if (!n) { 2154 if (!n) {
@@ -2161,7 +2157,6 @@ static int __kick_flushing_caps(struct ceph_mds_client *mdsc,
2161 } 2157 }
2162 2158
2163 cf = rb_entry(n, struct ceph_cap_flush, i_node); 2159 cf = rb_entry(n, struct ceph_cap_flush, i_node);
2164 cf->kick = false;
2165 2160
2166 first_tid = cf->tid + 1; 2161 first_tid = cf->tid + 1;
2167 2162
@@ -2181,8 +2176,6 @@ void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc,
2181{ 2176{
2182 struct ceph_inode_info *ci; 2177 struct ceph_inode_info *ci;
2183 struct ceph_cap *cap; 2178 struct ceph_cap *cap;
2184 struct ceph_cap_flush *cf;
2185 struct rb_node *n;
2186 2179
2187 dout("early_kick_flushing_caps mds%d\n", session->s_mds); 2180 dout("early_kick_flushing_caps mds%d\n", session->s_mds);
2188 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) { 2181 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
@@ -2205,16 +2198,11 @@ void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc,
2205 if ((cap->issued & ci->i_flushing_caps) != 2198 if ((cap->issued & ci->i_flushing_caps) !=
2206 ci->i_flushing_caps) { 2199 ci->i_flushing_caps) {
2207 spin_unlock(&ci->i_ceph_lock); 2200 spin_unlock(&ci->i_ceph_lock);
2208 if (!__kick_flushing_caps(mdsc, session, ci, true)) 2201 if (!__kick_flushing_caps(mdsc, session, ci))
2209 continue; 2202 continue;
2210 spin_lock(&ci->i_ceph_lock); 2203 spin_lock(&ci->i_ceph_lock);
2211 } 2204 }
2212 2205
2213 for (n = rb_first(&ci->i_cap_flush_tree); n; n = rb_next(n)) {
2214 cf = rb_entry(n, struct ceph_cap_flush, i_node);
2215 cf->kick = true;
2216 }
2217
2218 spin_unlock(&ci->i_ceph_lock); 2206 spin_unlock(&ci->i_ceph_lock);
2219 } 2207 }
2220} 2208}
@@ -2228,7 +2216,7 @@ void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
2228 2216
2229 dout("kick_flushing_caps mds%d\n", session->s_mds); 2217 dout("kick_flushing_caps mds%d\n", session->s_mds);
2230 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) { 2218 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
2231 int delayed = __kick_flushing_caps(mdsc, session, ci, false); 2219 int delayed = __kick_flushing_caps(mdsc, session, ci);
2232 if (delayed) { 2220 if (delayed) {
2233 spin_lock(&ci->i_ceph_lock); 2221 spin_lock(&ci->i_ceph_lock);
2234 __cap_delay_requeue(mdsc, ci); 2222 __cap_delay_requeue(mdsc, ci);
@@ -2261,7 +2249,7 @@ static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
2261 2249
2262 spin_unlock(&ci->i_ceph_lock); 2250 spin_unlock(&ci->i_ceph_lock);
2263 2251
2264 delayed = __kick_flushing_caps(mdsc, session, ci, true); 2252 delayed = __kick_flushing_caps(mdsc, session, ci);
2265 if (delayed) { 2253 if (delayed) {
2266 spin_lock(&ci->i_ceph_lock); 2254 spin_lock(&ci->i_ceph_lock);
2267 __cap_delay_requeue(mdsc, ci); 2255 __cap_delay_requeue(mdsc, ci);
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index 4347039ecc18..6706bde9ad1b 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -287,7 +287,7 @@ int ceph_encode_locks_to_buffer(struct inode *inode,
287 return 0; 287 return 0;
288 288
289 spin_lock(&ctx->flc_lock); 289 spin_lock(&ctx->flc_lock);
290 list_for_each_entry(lock, &ctx->flc_flock, fl_list) { 290 list_for_each_entry(lock, &ctx->flc_posix, fl_list) {
291 ++seen_fcntl; 291 ++seen_fcntl;
292 if (seen_fcntl > num_fcntl_locks) { 292 if (seen_fcntl > num_fcntl_locks) {
293 err = -ENOSPC; 293 err = -ENOSPC;
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 860cc016e70d..2f2460d23a06 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -189,7 +189,6 @@ static inline void ceph_put_cap_snap(struct ceph_cap_snap *capsnap)
189struct ceph_cap_flush { 189struct ceph_cap_flush {
190 u64 tid; 190 u64 tid;
191 int caps; 191 int caps;
192 bool kick;
193 struct rb_node g_node; // global 192 struct rb_node g_node; // global
194 union { 193 union {
195 struct rb_node i_node; // inode 194 struct rb_node i_node; // inode
diff --git a/fs/configfs/item.c b/fs/configfs/item.c
index 4d6a30e76168..b863a09cd2f1 100644
--- a/fs/configfs/item.c
+++ b/fs/configfs/item.c
@@ -115,7 +115,7 @@ void config_item_init_type_name(struct config_item *item,
115 const char *name, 115 const char *name,
116 struct config_item_type *type) 116 struct config_item_type *type)
117{ 117{
118 config_item_set_name(item, name); 118 config_item_set_name(item, "%s", name);
119 item->ci_type = type; 119 item->ci_type = type;
120 config_item_init(item); 120 config_item_init(item);
121} 121}
@@ -124,7 +124,7 @@ EXPORT_SYMBOL(config_item_init_type_name);
124void config_group_init_type_name(struct config_group *group, const char *name, 124void config_group_init_type_name(struct config_group *group, const char *name,
125 struct config_item_type *type) 125 struct config_item_type *type)
126{ 126{
127 config_item_set_name(&group->cg_item, name); 127 config_item_set_name(&group->cg_item, "%s", name);
128 group->cg_item.ci_type = type; 128 group->cg_item.ci_type = type;
129 config_group_init(group); 129 config_group_init(group);
130} 130}
diff --git a/fs/dax.c b/fs/dax.c
index c3e21ccfc358..a7f77e1fa18c 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -319,6 +319,12 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
319 * @vma: The virtual memory area where the fault occurred 319 * @vma: The virtual memory area where the fault occurred
320 * @vmf: The description of the fault 320 * @vmf: The description of the fault
321 * @get_block: The filesystem method used to translate file offsets to blocks 321 * @get_block: The filesystem method used to translate file offsets to blocks
322 * @complete_unwritten: The filesystem method used to convert unwritten blocks
323 * to written so the data written to them is exposed. This is required for
324 * required by write faults for filesystems that will return unwritten
325 * extent mappings from @get_block, but it is optional for reads as
326 * dax_insert_mapping() will always zero unwritten blocks. If the fs does
327 * not support unwritten extents, the it should pass NULL.
322 * 328 *
323 * When a page fault occurs, filesystems may call this helper in their 329 * When a page fault occurs, filesystems may call this helper in their
324 * fault handler for DAX files. __dax_fault() assumes the caller has done all 330 * fault handler for DAX files. __dax_fault() assumes the caller has done all
@@ -437,8 +443,12 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
437 * as for normal BH based IO completions. 443 * as for normal BH based IO completions.
438 */ 444 */
439 error = dax_insert_mapping(inode, &bh, vma, vmf); 445 error = dax_insert_mapping(inode, &bh, vma, vmf);
440 if (buffer_unwritten(&bh)) 446 if (buffer_unwritten(&bh)) {
441 complete_unwritten(&bh, !error); 447 if (complete_unwritten)
448 complete_unwritten(&bh, !error);
449 else
450 WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE));
451 }
442 452
443 out: 453 out:
444 if (error == -ENOMEM) 454 if (error == -ENOMEM)
diff --git a/fs/dcache.c b/fs/dcache.c
index 5c8ea15e73a5..9b5fe503f6cb 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -3442,22 +3442,15 @@ void __init vfs_caches_init_early(void)
3442 inode_init_early(); 3442 inode_init_early();
3443} 3443}
3444 3444
3445void __init vfs_caches_init(unsigned long mempages) 3445void __init vfs_caches_init(void)
3446{ 3446{
3447 unsigned long reserve;
3448
3449 /* Base hash sizes on available memory, with a reserve equal to
3450 150% of current kernel size */
3451
3452 reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1);
3453 mempages -= reserve;
3454
3455 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0, 3447 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
3456 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 3448 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3457 3449
3458 dcache_init(); 3450 dcache_init();
3459 inode_init(); 3451 inode_init();
3460 files_init(mempages); 3452 files_init();
3453 files_maxfiles_init();
3461 mnt_init(); 3454 mnt_init();
3462 bdev_cache_init(); 3455 bdev_cache_init();
3463 chrdev_init(); 3456 chrdev_init();
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 9bedfa8dd3a5..f71e19a9dd3c 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -2072,8 +2072,6 @@ static int f2fs_set_data_page_dirty(struct page *page)
2072 return 1; 2072 return 1;
2073 } 2073 }
2074 2074
2075 mark_inode_dirty(inode);
2076
2077 if (!PageDirty(page)) { 2075 if (!PageDirty(page)) {
2078 __set_page_dirty_nobuffers(page); 2076 __set_page_dirty_nobuffers(page);
2079 update_dirty_page(inode, page); 2077 update_dirty_page(inode, page);
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index ada2a3dd701a..b0f38c3b37f4 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -1331,12 +1331,13 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp)
1331 if (ret) 1331 if (ret)
1332 return ret; 1332 return ret;
1333 1333
1334 if (f2fs_is_atomic_file(inode)) 1334 if (f2fs_is_atomic_file(inode)) {
1335 clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
1335 commit_inmem_pages(inode, false); 1336 commit_inmem_pages(inode, false);
1337 }
1336 1338
1337 ret = f2fs_sync_file(filp, 0, LONG_MAX, 0); 1339 ret = f2fs_sync_file(filp, 0, LONG_MAX, 0);
1338 mnt_drop_write_file(filp); 1340 mnt_drop_write_file(filp);
1339 clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
1340 return ret; 1341 return ret;
1341} 1342}
1342 1343
@@ -1387,8 +1388,8 @@ static int f2fs_ioc_abort_volatile_write(struct file *filp)
1387 f2fs_balance_fs(F2FS_I_SB(inode)); 1388 f2fs_balance_fs(F2FS_I_SB(inode));
1388 1389
1389 if (f2fs_is_atomic_file(inode)) { 1390 if (f2fs_is_atomic_file(inode)) {
1390 commit_inmem_pages(inode, false);
1391 clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE); 1391 clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
1392 commit_inmem_pages(inode, false);
1392 } 1393 }
1393 1394
1394 if (f2fs_is_volatile_file(inode)) 1395 if (f2fs_is_volatile_file(inode))
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index e1e73617d13b..22fb5ef37966 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -556,27 +556,39 @@ static void move_encrypted_block(struct inode *inode, block_t bidx)
556 if (!fio.encrypted_page) 556 if (!fio.encrypted_page)
557 goto put_out; 557 goto put_out;
558 558
559 f2fs_submit_page_bio(&fio); 559 err = f2fs_submit_page_bio(&fio);
560 if (err)
561 goto put_page_out;
562
563 /* write page */
564 lock_page(fio.encrypted_page);
565
566 if (unlikely(!PageUptodate(fio.encrypted_page)))
567 goto put_page_out;
568 if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi)))
569 goto put_page_out;
570
571 set_page_dirty(fio.encrypted_page);
572 f2fs_wait_on_page_writeback(fio.encrypted_page, META);
573 if (clear_page_dirty_for_io(fio.encrypted_page))
574 dec_page_count(fio.sbi, F2FS_DIRTY_META);
575
576 set_page_writeback(fio.encrypted_page);
560 577
561 /* allocate block address */ 578 /* allocate block address */
562 f2fs_wait_on_page_writeback(dn.node_page, NODE); 579 f2fs_wait_on_page_writeback(dn.node_page, NODE);
563
564 allocate_data_block(fio.sbi, NULL, fio.blk_addr, 580 allocate_data_block(fio.sbi, NULL, fio.blk_addr,
565 &fio.blk_addr, &sum, CURSEG_COLD_DATA); 581 &fio.blk_addr, &sum, CURSEG_COLD_DATA);
566 dn.data_blkaddr = fio.blk_addr;
567
568 /* write page */
569 lock_page(fio.encrypted_page);
570 set_page_writeback(fio.encrypted_page);
571 fio.rw = WRITE_SYNC; 582 fio.rw = WRITE_SYNC;
572 f2fs_submit_page_mbio(&fio); 583 f2fs_submit_page_mbio(&fio);
573 584
585 dn.data_blkaddr = fio.blk_addr;
574 set_data_blkaddr(&dn); 586 set_data_blkaddr(&dn);
575 f2fs_update_extent_cache(&dn); 587 f2fs_update_extent_cache(&dn);
576 set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE); 588 set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
577 if (page->index == 0) 589 if (page->index == 0)
578 set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN); 590 set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
579 591put_page_out:
580 f2fs_put_page(fio.encrypted_page, 1); 592 f2fs_put_page(fio.encrypted_page, 1);
581put_out: 593put_out:
582 f2fs_put_dnode(&dn); 594 f2fs_put_dnode(&dn);
@@ -605,8 +617,8 @@ static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
605 .page = page, 617 .page = page,
606 .encrypted_page = NULL, 618 .encrypted_page = NULL,
607 }; 619 };
620 set_page_dirty(page);
608 f2fs_wait_on_page_writeback(page, DATA); 621 f2fs_wait_on_page_writeback(page, DATA);
609
610 if (clear_page_dirty_for_io(page)) 622 if (clear_page_dirty_for_io(page))
611 inode_dec_dirty_pages(inode); 623 inode_dec_dirty_pages(inode);
612 set_cold_data(page); 624 set_cold_data(page);
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 38e75fb1e488..a13ffcc32992 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -141,6 +141,8 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
141 kunmap_atomic(dst_addr); 141 kunmap_atomic(dst_addr);
142 SetPageUptodate(page); 142 SetPageUptodate(page);
143no_update: 143no_update:
144 set_page_dirty(page);
145
144 /* clear dirty state */ 146 /* clear dirty state */
145 dirty = clear_page_dirty_for_io(page); 147 dirty = clear_page_dirty_for_io(page);
146 148
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 1eb343768781..61b97f9cb9f6 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -257,6 +257,7 @@ void commit_inmem_pages(struct inode *inode, bool abort)
257 if (!abort) { 257 if (!abort) {
258 lock_page(cur->page); 258 lock_page(cur->page);
259 if (cur->page->mapping == inode->i_mapping) { 259 if (cur->page->mapping == inode->i_mapping) {
260 set_page_dirty(cur->page);
260 f2fs_wait_on_page_writeback(cur->page, DATA); 261 f2fs_wait_on_page_writeback(cur->page, DATA);
261 if (clear_page_dirty_for_io(cur->page)) 262 if (clear_page_dirty_for_io(cur->page))
262 inode_dec_dirty_pages(inode); 263 inode_dec_dirty_pages(inode);
diff --git a/fs/file_table.c b/fs/file_table.c
index 7f9d407c7595..ad17e05ebf95 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -25,6 +25,7 @@
25#include <linux/hardirq.h> 25#include <linux/hardirq.h>
26#include <linux/task_work.h> 26#include <linux/task_work.h>
27#include <linux/ima.h> 27#include <linux/ima.h>
28#include <linux/swap.h>
28 29
29#include <linux/atomic.h> 30#include <linux/atomic.h>
30 31
@@ -308,19 +309,24 @@ void put_filp(struct file *file)
308 } 309 }
309} 310}
310 311
311void __init files_init(unsigned long mempages) 312void __init files_init(void)
312{ 313{
313 unsigned long n;
314
315 filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, 314 filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
316 SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); 315 SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
316 percpu_counter_init(&nr_files, 0, GFP_KERNEL);
317}
317 318
318 /* 319/*
319 * One file with associated inode and dcache is very roughly 1K. 320 * One file with associated inode and dcache is very roughly 1K. Per default
320 * Per default don't use more than 10% of our memory for files. 321 * do not use more than 10% of our memory for files.
321 */ 322 */
323void __init files_maxfiles_init(void)
324{
325 unsigned long n;
326 unsigned long memreserve = (totalram_pages - nr_free_pages()) * 3/2;
327
328 memreserve = min(memreserve, totalram_pages - 1);
329 n = ((totalram_pages - memreserve) * (PAGE_SIZE / 1024)) / 10;
322 330
323 n = (mempages * (PAGE_SIZE / 1024)) / 10;
324 files_stat.max_files = max_t(unsigned long, n, NR_FILE); 331 files_stat.max_files = max_t(unsigned long, n, NR_FILE);
325 percpu_counter_init(&nr_files, 0, GFP_KERNEL);
326} 332}
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index f0520bcf2094..518c6294bf6c 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -702,6 +702,7 @@ void wbc_account_io(struct writeback_control *wbc, struct page *page,
702 else 702 else
703 wbc->wb_tcand_bytes -= min(bytes, wbc->wb_tcand_bytes); 703 wbc->wb_tcand_bytes -= min(bytes, wbc->wb_tcand_bytes);
704} 704}
705EXPORT_SYMBOL_GPL(wbc_account_io);
705 706
706/** 707/**
707 * inode_congested - test whether an inode is congested 708 * inode_congested - test whether an inode is congested
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 80cc1b35d460..ebb5e37455a0 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -2246,7 +2246,15 @@ static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
2246 2246
2247 err = -EINVAL; 2247 err = -EINVAL;
2248 if (old) { 2248 if (old) {
2249 struct fuse_dev *fud = fuse_get_dev(old); 2249 struct fuse_dev *fud = NULL;
2250
2251 /*
2252 * Check against file->f_op because CUSE
2253 * uses the same ioctl handler.
2254 */
2255 if (old->f_op == file->f_op &&
2256 old->f_cred->user_ns == file->f_cred->user_ns)
2257 fud = fuse_get_dev(old);
2250 2258
2251 if (fud) { 2259 if (fud) {
2252 mutex_lock(&fuse_mutex); 2260 mutex_lock(&fuse_mutex);
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 0cf74df68617..973c24ce59ad 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -1010,6 +1010,8 @@ struct file *hugetlb_file_setup(const char *name, size_t size,
1010 inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0); 1010 inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0);
1011 if (!inode) 1011 if (!inode)
1012 goto out_dentry; 1012 goto out_dentry;
1013 if (creat_flags == HUGETLB_SHMFS_INODE)
1014 inode->i_flags |= S_PRIVATE;
1013 1015
1014 file = ERR_PTR(-ENOMEM); 1016 file = ERR_PTR(-ENOMEM);
1015 if (hugetlb_reserve_pages(inode, 0, 1017 if (hugetlb_reserve_pages(inode, 0,
diff --git a/fs/jfs/file.c b/fs/jfs/file.c
index e98d39d75cf4..b9dc23cd04f2 100644
--- a/fs/jfs/file.c
+++ b/fs/jfs/file.c
@@ -76,7 +76,7 @@ static int jfs_open(struct inode *inode, struct file *file)
76 if (ji->active_ag == -1) { 76 if (ji->active_ag == -1) {
77 struct jfs_sb_info *jfs_sb = JFS_SBI(inode->i_sb); 77 struct jfs_sb_info *jfs_sb = JFS_SBI(inode->i_sb);
78 ji->active_ag = BLKTOAG(addressPXD(&ji->ixpxd), jfs_sb); 78 ji->active_ag = BLKTOAG(addressPXD(&ji->ixpxd), jfs_sb);
79 atomic_inc( &jfs_sb->bmap->db_active[ji->active_ag]); 79 atomic_inc(&jfs_sb->bmap->db_active[ji->active_ag]);
80 } 80 }
81 spin_unlock_irq(&ji->ag_lock); 81 spin_unlock_irq(&ji->ag_lock);
82 } 82 }
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index 6f1cb2b5ee28..41aa3ca6a6a4 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -134,11 +134,11 @@ int jfs_write_inode(struct inode *inode, struct writeback_control *wbc)
134 * It has been committed since the last change, but was still 134 * It has been committed since the last change, but was still
135 * on the dirty inode list. 135 * on the dirty inode list.
136 */ 136 */
137 if (!test_cflag(COMMIT_Dirty, inode)) { 137 if (!test_cflag(COMMIT_Dirty, inode)) {
138 /* Make sure committed changes hit the disk */ 138 /* Make sure committed changes hit the disk */
139 jfs_flush_journal(JFS_SBI(inode->i_sb)->log, wait); 139 jfs_flush_journal(JFS_SBI(inode->i_sb)->log, wait);
140 return 0; 140 return 0;
141 } 141 }
142 142
143 if (jfs_commit_inode(inode, wait)) { 143 if (jfs_commit_inode(inode, wait)) {
144 jfs_err("jfs_write_inode: jfs_commit_inode failed!"); 144 jfs_err("jfs_write_inode: jfs_commit_inode failed!");
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index e33be921aa41..a5ac97b9a933 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -1160,7 +1160,7 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1160 rc = dtModify(tid, new_dir, &new_dname, &ino, 1160 rc = dtModify(tid, new_dir, &new_dname, &ino,
1161 old_ip->i_ino, JFS_RENAME); 1161 old_ip->i_ino, JFS_RENAME);
1162 if (rc) 1162 if (rc)
1163 goto out4; 1163 goto out_tx;
1164 drop_nlink(new_ip); 1164 drop_nlink(new_ip);
1165 if (S_ISDIR(new_ip->i_mode)) { 1165 if (S_ISDIR(new_ip->i_mode)) {
1166 drop_nlink(new_ip); 1166 drop_nlink(new_ip);
@@ -1185,7 +1185,7 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1185 if ((new_size = commitZeroLink(tid, new_ip)) < 0) { 1185 if ((new_size = commitZeroLink(tid, new_ip)) < 0) {
1186 txAbort(tid, 1); /* Marks FS Dirty */ 1186 txAbort(tid, 1); /* Marks FS Dirty */
1187 rc = new_size; 1187 rc = new_size;
1188 goto out4; 1188 goto out_tx;
1189 } 1189 }
1190 tblk = tid_to_tblock(tid); 1190 tblk = tid_to_tblock(tid);
1191 tblk->xflag |= COMMIT_DELETE; 1191 tblk->xflag |= COMMIT_DELETE;
@@ -1203,7 +1203,7 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1203 if (rc) { 1203 if (rc) {
1204 jfs_err("jfs_rename didn't expect dtSearch to fail " 1204 jfs_err("jfs_rename didn't expect dtSearch to fail "
1205 "w/rc = %d", rc); 1205 "w/rc = %d", rc);
1206 goto out4; 1206 goto out_tx;
1207 } 1207 }
1208 1208
1209 ino = old_ip->i_ino; 1209 ino = old_ip->i_ino;
@@ -1211,7 +1211,7 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1211 if (rc) { 1211 if (rc) {
1212 if (rc == -EIO) 1212 if (rc == -EIO)
1213 jfs_err("jfs_rename: dtInsert returned -EIO"); 1213 jfs_err("jfs_rename: dtInsert returned -EIO");
1214 goto out4; 1214 goto out_tx;
1215 } 1215 }
1216 if (S_ISDIR(old_ip->i_mode)) 1216 if (S_ISDIR(old_ip->i_mode))
1217 inc_nlink(new_dir); 1217 inc_nlink(new_dir);
@@ -1226,7 +1226,7 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1226 jfs_err("jfs_rename did not expect dtDelete to return rc = %d", 1226 jfs_err("jfs_rename did not expect dtDelete to return rc = %d",
1227 rc); 1227 rc);
1228 txAbort(tid, 1); /* Marks Filesystem dirty */ 1228 txAbort(tid, 1); /* Marks Filesystem dirty */
1229 goto out4; 1229 goto out_tx;
1230 } 1230 }
1231 if (S_ISDIR(old_ip->i_mode)) { 1231 if (S_ISDIR(old_ip->i_mode)) {
1232 drop_nlink(old_dir); 1232 drop_nlink(old_dir);
@@ -1285,7 +1285,7 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1285 1285
1286 rc = txCommit(tid, ipcount, iplist, commit_flag); 1286 rc = txCommit(tid, ipcount, iplist, commit_flag);
1287 1287
1288 out4: 1288 out_tx:
1289 txEnd(tid); 1289 txEnd(tid);
1290 if (new_ip) 1290 if (new_ip)
1291 mutex_unlock(&JFS_IP(new_ip)->commit_mutex); 1291 mutex_unlock(&JFS_IP(new_ip)->commit_mutex);
@@ -1308,13 +1308,6 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1308 } 1308 }
1309 if (new_ip && (new_ip->i_nlink == 0)) 1309 if (new_ip && (new_ip->i_nlink == 0))
1310 set_cflag(COMMIT_Nolink, new_ip); 1310 set_cflag(COMMIT_Nolink, new_ip);
1311 out3:
1312 free_UCSname(&new_dname);
1313 out2:
1314 free_UCSname(&old_dname);
1315 out1:
1316 if (new_ip && !S_ISDIR(new_ip->i_mode))
1317 IWRITE_UNLOCK(new_ip);
1318 /* 1311 /*
1319 * Truncating the directory index table is not guaranteed. It 1312 * Truncating the directory index table is not guaranteed. It
1320 * may need to be done iteratively 1313 * may need to be done iteratively
@@ -1325,7 +1318,13 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1325 1318
1326 clear_cflag(COMMIT_Stale, old_dir); 1319 clear_cflag(COMMIT_Stale, old_dir);
1327 } 1320 }
1328 1321 if (new_ip && !S_ISDIR(new_ip->i_mode))
1322 IWRITE_UNLOCK(new_ip);
1323 out3:
1324 free_UCSname(&new_dname);
1325 out2:
1326 free_UCSname(&old_dname);
1327 out1:
1329 jfs_info("jfs_rename: returning %d", rc); 1328 jfs_info("jfs_rename: returning %d", rc);
1330 return rc; 1329 return rc;
1331} 1330}
diff --git a/fs/locks.c b/fs/locks.c
index 653faabb07f4..d3d558ba4da7 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -862,12 +862,11 @@ static int posix_locks_deadlock(struct file_lock *caller_fl,
862 * whether or not a lock was successfully freed by testing the return 862 * whether or not a lock was successfully freed by testing the return
863 * value for -ENOENT. 863 * value for -ENOENT.
864 */ 864 */
865static int flock_lock_file(struct file *filp, struct file_lock *request) 865static int flock_lock_inode(struct inode *inode, struct file_lock *request)
866{ 866{
867 struct file_lock *new_fl = NULL; 867 struct file_lock *new_fl = NULL;
868 struct file_lock *fl; 868 struct file_lock *fl;
869 struct file_lock_context *ctx; 869 struct file_lock_context *ctx;
870 struct inode *inode = file_inode(filp);
871 int error = 0; 870 int error = 0;
872 bool found = false; 871 bool found = false;
873 LIST_HEAD(dispose); 872 LIST_HEAD(dispose);
@@ -890,7 +889,7 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
890 goto find_conflict; 889 goto find_conflict;
891 890
892 list_for_each_entry(fl, &ctx->flc_flock, fl_list) { 891 list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
893 if (filp != fl->fl_file) 892 if (request->fl_file != fl->fl_file)
894 continue; 893 continue;
895 if (request->fl_type == fl->fl_type) 894 if (request->fl_type == fl->fl_type)
896 goto out; 895 goto out;
@@ -1164,20 +1163,19 @@ int posix_lock_file(struct file *filp, struct file_lock *fl,
1164EXPORT_SYMBOL(posix_lock_file); 1163EXPORT_SYMBOL(posix_lock_file);
1165 1164
1166/** 1165/**
1167 * posix_lock_file_wait - Apply a POSIX-style lock to a file 1166 * posix_lock_inode_wait - Apply a POSIX-style lock to a file
1168 * @filp: The file to apply the lock to 1167 * @inode: inode of file to which lock request should be applied
1169 * @fl: The lock to be applied 1168 * @fl: The lock to be applied
1170 * 1169 *
1171 * Add a POSIX style lock to a file. 1170 * Variant of posix_lock_file_wait that does not take a filp, and so can be
1172 * We merge adjacent & overlapping locks whenever possible. 1171 * used after the filp has already been torn down.
1173 * POSIX locks are sorted by owner task, then by starting address
1174 */ 1172 */
1175int posix_lock_file_wait(struct file *filp, struct file_lock *fl) 1173int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
1176{ 1174{
1177 int error; 1175 int error;
1178 might_sleep (); 1176 might_sleep ();
1179 for (;;) { 1177 for (;;) {
1180 error = posix_lock_file(filp, fl, NULL); 1178 error = __posix_lock_file(inode, fl, NULL);
1181 if (error != FILE_LOCK_DEFERRED) 1179 if (error != FILE_LOCK_DEFERRED)
1182 break; 1180 break;
1183 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); 1181 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
@@ -1189,7 +1187,7 @@ int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
1189 } 1187 }
1190 return error; 1188 return error;
1191} 1189}
1192EXPORT_SYMBOL(posix_lock_file_wait); 1190EXPORT_SYMBOL(posix_lock_inode_wait);
1193 1191
1194/** 1192/**
1195 * locks_mandatory_locked - Check for an active lock 1193 * locks_mandatory_locked - Check for an active lock
@@ -1851,18 +1849,18 @@ int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1851} 1849}
1852 1850
1853/** 1851/**
1854 * flock_lock_file_wait - Apply a FLOCK-style lock to a file 1852 * flock_lock_inode_wait - Apply a FLOCK-style lock to a file
1855 * @filp: The file to apply the lock to 1853 * @inode: inode of the file to apply to
1856 * @fl: The lock to be applied 1854 * @fl: The lock to be applied
1857 * 1855 *
1858 * Add a FLOCK style lock to a file. 1856 * Apply a FLOCK style lock request to an inode.
1859 */ 1857 */
1860int flock_lock_file_wait(struct file *filp, struct file_lock *fl) 1858int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
1861{ 1859{
1862 int error; 1860 int error;
1863 might_sleep(); 1861 might_sleep();
1864 for (;;) { 1862 for (;;) {
1865 error = flock_lock_file(filp, fl); 1863 error = flock_lock_inode(inode, fl);
1866 if (error != FILE_LOCK_DEFERRED) 1864 if (error != FILE_LOCK_DEFERRED)
1867 break; 1865 break;
1868 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); 1866 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
@@ -1874,8 +1872,7 @@ int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
1874 } 1872 }
1875 return error; 1873 return error;
1876} 1874}
1877 1875EXPORT_SYMBOL(flock_lock_inode_wait);
1878EXPORT_SYMBOL(flock_lock_file_wait);
1879 1876
1880/** 1877/**
1881 * sys_flock: - flock() system call. 1878 * sys_flock: - flock() system call.
@@ -2401,7 +2398,8 @@ locks_remove_flock(struct file *filp)
2401 .fl_type = F_UNLCK, 2398 .fl_type = F_UNLCK,
2402 .fl_end = OFFSET_MAX, 2399 .fl_end = OFFSET_MAX,
2403 }; 2400 };
2404 struct file_lock_context *flctx = file_inode(filp)->i_flctx; 2401 struct inode *inode = file_inode(filp);
2402 struct file_lock_context *flctx = inode->i_flctx;
2405 2403
2406 if (list_empty(&flctx->flc_flock)) 2404 if (list_empty(&flctx->flc_flock))
2407 return; 2405 return;
@@ -2409,7 +2407,7 @@ locks_remove_flock(struct file *filp)
2409 if (filp->f_op->flock) 2407 if (filp->f_op->flock)
2410 filp->f_op->flock(filp, F_SETLKW, &fl); 2408 filp->f_op->flock(filp, F_SETLKW, &fl);
2411 else 2409 else
2412 flock_lock_file(filp, &fl); 2410 flock_lock_inode(inode, &fl);
2413 2411
2414 if (fl.fl_ops && fl.fl_ops->fl_release_private) 2412 if (fl.fl_ops && fl.fl_ops->fl_release_private)
2415 fl.fl_ops->fl_release_private(&fl); 2413 fl.fl_ops->fl_release_private(&fl);
diff --git a/fs/namei.c b/fs/namei.c
index ae4e4c18b2ac..1c2105ed20c5 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -879,7 +879,7 @@ static inline int may_follow_link(struct nameidata *nd)
879 return 0; 879 return 0;
880 880
881 /* Allowed if parent directory not sticky and world-writable. */ 881 /* Allowed if parent directory not sticky and world-writable. */
882 parent = nd->path.dentry->d_inode; 882 parent = nd->inode;
883 if ((parent->i_mode & (S_ISVTX|S_IWOTH)) != (S_ISVTX|S_IWOTH)) 883 if ((parent->i_mode & (S_ISVTX|S_IWOTH)) != (S_ISVTX|S_IWOTH))
884 return 0; 884 return 0;
885 885
@@ -1954,8 +1954,13 @@ OK:
1954 continue; 1954 continue;
1955 } 1955 }
1956 } 1956 }
1957 if (unlikely(!d_can_lookup(nd->path.dentry))) 1957 if (unlikely(!d_can_lookup(nd->path.dentry))) {
1958 if (nd->flags & LOOKUP_RCU) {
1959 if (unlazy_walk(nd, NULL, 0))
1960 return -ECHILD;
1961 }
1958 return -ENOTDIR; 1962 return -ENOTDIR;
1963 }
1959 } 1964 }
1960} 1965}
1961 1966
diff --git a/fs/namespace.c b/fs/namespace.c
index c7cb8a526c05..2b8aa15fd6df 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1361,6 +1361,36 @@ enum umount_tree_flags {
1361 UMOUNT_PROPAGATE = 2, 1361 UMOUNT_PROPAGATE = 2,
1362 UMOUNT_CONNECTED = 4, 1362 UMOUNT_CONNECTED = 4,
1363}; 1363};
1364
1365static bool disconnect_mount(struct mount *mnt, enum umount_tree_flags how)
1366{
1367 /* Leaving mounts connected is only valid for lazy umounts */
1368 if (how & UMOUNT_SYNC)
1369 return true;
1370
1371 /* A mount without a parent has nothing to be connected to */
1372 if (!mnt_has_parent(mnt))
1373 return true;
1374
1375 /* Because the reference counting rules change when mounts are
1376 * unmounted and connected, umounted mounts may not be
1377 * connected to mounted mounts.
1378 */
1379 if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT))
1380 return true;
1381
1382 /* Has it been requested that the mount remain connected? */
1383 if (how & UMOUNT_CONNECTED)
1384 return false;
1385
1386 /* Is the mount locked such that it needs to remain connected? */
1387 if (IS_MNT_LOCKED(mnt))
1388 return false;
1389
1390 /* By default disconnect the mount */
1391 return true;
1392}
1393
1364/* 1394/*
1365 * mount_lock must be held 1395 * mount_lock must be held
1366 * namespace_sem must be held for write 1396 * namespace_sem must be held for write
@@ -1398,10 +1428,7 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
1398 if (how & UMOUNT_SYNC) 1428 if (how & UMOUNT_SYNC)
1399 p->mnt.mnt_flags |= MNT_SYNC_UMOUNT; 1429 p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
1400 1430
1401 disconnect = !(((how & UMOUNT_CONNECTED) && 1431 disconnect = disconnect_mount(p, how);
1402 mnt_has_parent(p) &&
1403 (p->mnt_parent->mnt.mnt_flags & MNT_UMOUNT)) ||
1404 IS_MNT_LOCKED_AND_LAZY(p));
1405 1432
1406 pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt, 1433 pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt,
1407 disconnect ? &unmounted : NULL); 1434 disconnect ? &unmounted : NULL);
@@ -1538,11 +1565,8 @@ void __detach_mounts(struct dentry *dentry)
1538 while (!hlist_empty(&mp->m_list)) { 1565 while (!hlist_empty(&mp->m_list)) {
1539 mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list); 1566 mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
1540 if (mnt->mnt.mnt_flags & MNT_UMOUNT) { 1567 if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
1541 struct mount *p, *tmp; 1568 hlist_add_head(&mnt->mnt_umount.s_list, &unmounted);
1542 list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) { 1569 umount_mnt(mnt);
1543 hlist_add_head(&p->mnt_umount.s_list, &unmounted);
1544 umount_mnt(p);
1545 }
1546 } 1570 }
1547 else umount_tree(mnt, UMOUNT_CONNECTED); 1571 else umount_tree(mnt, UMOUNT_CONNECTED);
1548 } 1572 }
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index ecebb406cc1a..4a90c9bb3135 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -775,7 +775,7 @@ static int nfs_init_server(struct nfs_server *server,
775 server->options = data->options; 775 server->options = data->options;
776 server->caps |= NFS_CAP_HARDLINKS|NFS_CAP_SYMLINKS|NFS_CAP_FILEID| 776 server->caps |= NFS_CAP_HARDLINKS|NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
777 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|NFS_CAP_OWNER_GROUP| 777 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|NFS_CAP_OWNER_GROUP|
778 NFS_CAP_ATIME|NFS_CAP_CTIME|NFS_CAP_MTIME|NFS_CAP_CHANGE_ATTR; 778 NFS_CAP_ATIME|NFS_CAP_CTIME|NFS_CAP_MTIME;
779 779
780 if (data->rsize) 780 if (data->rsize)
781 server->rsize = nfs_block_size(data->rsize, NULL); 781 server->rsize = nfs_block_size(data->rsize, NULL);
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index c12951b9551e..b3289d701eea 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -1852,7 +1852,7 @@ ff_layout_mirror_prepare_stats(struct nfs42_layoutstat_args *args,
1852 struct nfs42_layoutstat_devinfo *devinfo; 1852 struct nfs42_layoutstat_devinfo *devinfo;
1853 int i; 1853 int i;
1854 1854
1855 for (i = 0; i <= FF_LAYOUT_MIRROR_COUNT(pls); i++) { 1855 for (i = 0; i < FF_LAYOUT_MIRROR_COUNT(pls); i++) {
1856 if (*dev_count >= dev_limit) 1856 if (*dev_count >= dev_limit)
1857 break; 1857 break;
1858 mirror = FF_LAYOUT_COMP(pls, i); 1858 mirror = FF_LAYOUT_COMP(pls, i);
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index b77b328a06d7..0adc7d245b3d 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -442,8 +442,9 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st
442 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR); 442 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
443 if (fattr->valid & NFS_ATTR_FATTR_CHANGE) 443 if (fattr->valid & NFS_ATTR_FATTR_CHANGE)
444 inode->i_version = fattr->change_attr; 444 inode->i_version = fattr->change_attr;
445 else if (nfs_server_capable(inode, NFS_CAP_CHANGE_ATTR)) 445 else
446 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR); 446 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR
447 | NFS_INO_REVAL_PAGECACHE);
447 if (fattr->valid & NFS_ATTR_FATTR_SIZE) 448 if (fattr->valid & NFS_ATTR_FATTR_SIZE)
448 inode->i_size = nfs_size_to_loff_t(fattr->size); 449 inode->i_size = nfs_size_to_loff_t(fattr->size);
449 else 450 else
@@ -1244,9 +1245,11 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
1244 if (fattr->valid & NFS_ATTR_FATTR_SIZE) { 1245 if (fattr->valid & NFS_ATTR_FATTR_SIZE) {
1245 cur_size = i_size_read(inode); 1246 cur_size = i_size_read(inode);
1246 new_isize = nfs_size_to_loff_t(fattr->size); 1247 new_isize = nfs_size_to_loff_t(fattr->size);
1247 if (cur_size != new_isize && nfsi->nrequests == 0) 1248 if (cur_size != new_isize)
1248 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE; 1249 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
1249 } 1250 }
1251 if (nfsi->nrequests != 0)
1252 invalid &= ~NFS_INO_REVAL_PAGECACHE;
1250 1253
1251 /* Have any file permissions changed? */ 1254 /* Have any file permissions changed? */
1252 if ((fattr->valid & NFS_ATTR_FATTR_MODE) && (inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO)) 1255 if ((fattr->valid & NFS_ATTR_FATTR_MODE) && (inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO))
@@ -1684,13 +1687,12 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1684 invalid |= NFS_INO_INVALID_ATTR 1687 invalid |= NFS_INO_INVALID_ATTR
1685 | NFS_INO_INVALID_DATA 1688 | NFS_INO_INVALID_DATA
1686 | NFS_INO_INVALID_ACCESS 1689 | NFS_INO_INVALID_ACCESS
1687 | NFS_INO_INVALID_ACL 1690 | NFS_INO_INVALID_ACL;
1688 | NFS_INO_REVAL_PAGECACHE;
1689 if (S_ISDIR(inode->i_mode)) 1691 if (S_ISDIR(inode->i_mode))
1690 nfs_force_lookup_revalidate(inode); 1692 nfs_force_lookup_revalidate(inode);
1691 inode->i_version = fattr->change_attr; 1693 inode->i_version = fattr->change_attr;
1692 } 1694 }
1693 } else if (server->caps & NFS_CAP_CHANGE_ATTR) 1695 } else
1694 nfsi->cache_validity |= save_cache_validity; 1696 nfsi->cache_validity |= save_cache_validity;
1695 1697
1696 if (fattr->valid & NFS_ATTR_FATTR_MTIME) { 1698 if (fattr->valid & NFS_ATTR_FATTR_MTIME) {
@@ -1717,7 +1719,6 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1717 if ((nfsi->nrequests == 0) || new_isize > cur_isize) { 1719 if ((nfsi->nrequests == 0) || new_isize > cur_isize) {
1718 i_size_write(inode, new_isize); 1720 i_size_write(inode, new_isize);
1719 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA; 1721 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
1720 invalid &= ~NFS_INO_REVAL_PAGECACHE;
1721 } 1722 }
1722 dprintk("NFS: isize change on server for file %s/%ld " 1723 dprintk("NFS: isize change on server for file %s/%ld "
1723 "(%Ld to %Ld)\n", 1724 "(%Ld to %Ld)\n",
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 7e3c4604bea8..9b372b845f6a 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -296,6 +296,22 @@ extern struct rpc_procinfo nfs4_procedures[];
296 296
297#ifdef CONFIG_NFS_V4_SECURITY_LABEL 297#ifdef CONFIG_NFS_V4_SECURITY_LABEL
298extern struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags); 298extern struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags);
299static inline struct nfs4_label *
300nfs4_label_copy(struct nfs4_label *dst, struct nfs4_label *src)
301{
302 if (!dst || !src)
303 return NULL;
304
305 if (src->len > NFS4_MAXLABELLEN)
306 return NULL;
307
308 dst->lfs = src->lfs;
309 dst->pi = src->pi;
310 dst->len = src->len;
311 memcpy(dst->label, src->label, src->len);
312
313 return dst;
314}
299static inline void nfs4_label_free(struct nfs4_label *label) 315static inline void nfs4_label_free(struct nfs4_label *label)
300{ 316{
301 if (label) { 317 if (label) {
@@ -316,6 +332,11 @@ static inline void nfs4_label_free(void *label) {}
316static inline void nfs_zap_label_cache_locked(struct nfs_inode *nfsi) 332static inline void nfs_zap_label_cache_locked(struct nfs_inode *nfsi)
317{ 333{
318} 334}
335static inline struct nfs4_label *
336nfs4_label_copy(struct nfs4_label *dst, struct nfs4_label *src)
337{
338 return NULL;
339}
319#endif /* CONFIG_NFS_V4_SECURITY_LABEL */ 340#endif /* CONFIG_NFS_V4_SECURITY_LABEL */
320 341
321/* proc.c */ 342/* proc.c */
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index f486b80f927a..d731bbf974aa 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -135,7 +135,7 @@ int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len)
135 return err; 135 return err;
136} 136}
137 137
138loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence) 138static loff_t _nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
139{ 139{
140 struct inode *inode = file_inode(filep); 140 struct inode *inode = file_inode(filep);
141 struct nfs42_seek_args args = { 141 struct nfs42_seek_args args = {
@@ -171,6 +171,23 @@ loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
171 return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes); 171 return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes);
172} 172}
173 173
174loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
175{
176 struct nfs_server *server = NFS_SERVER(file_inode(filep));
177 struct nfs4_exception exception = { };
178 int err;
179
180 do {
181 err = _nfs42_proc_llseek(filep, offset, whence);
182 if (err == -ENOTSUPP)
183 return -EOPNOTSUPP;
184 err = nfs4_handle_exception(server, err, &exception);
185 } while (exception.retry);
186
187 return err;
188}
189
190
174static void 191static void
175nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata) 192nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata)
176{ 193{
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 6f228b5af819..3acb1eb72930 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -467,7 +467,10 @@ static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
467 467
468static void renew_lease(const struct nfs_server *server, unsigned long timestamp) 468static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
469{ 469{
470 do_renew_lease(server->nfs_client, timestamp); 470 struct nfs_client *clp = server->nfs_client;
471
472 if (!nfs4_has_session(clp))
473 do_renew_lease(clp, timestamp);
471} 474}
472 475
473struct nfs4_call_sync_data { 476struct nfs4_call_sync_data {
@@ -616,8 +619,7 @@ int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
616 clp = session->clp; 619 clp = session->clp;
617 do_renew_lease(clp, res->sr_timestamp); 620 do_renew_lease(clp, res->sr_timestamp);
618 /* Check sequence flags */ 621 /* Check sequence flags */
619 if (res->sr_status_flags != 0) 622 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
620 nfs4_schedule_lease_recovery(clp);
621 nfs41_update_target_slotid(slot->table, slot, res); 623 nfs41_update_target_slotid(slot->table, slot, res);
622 break; 624 break;
623 case 1: 625 case 1:
@@ -910,6 +912,7 @@ struct nfs4_opendata {
910 struct nfs_open_confirmres c_res; 912 struct nfs_open_confirmres c_res;
911 struct nfs4_string owner_name; 913 struct nfs4_string owner_name;
912 struct nfs4_string group_name; 914 struct nfs4_string group_name;
915 struct nfs4_label *a_label;
913 struct nfs_fattr f_attr; 916 struct nfs_fattr f_attr;
914 struct nfs4_label *f_label; 917 struct nfs4_label *f_label;
915 struct dentry *dir; 918 struct dentry *dir;
@@ -1013,6 +1016,10 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
1013 if (IS_ERR(p->f_label)) 1016 if (IS_ERR(p->f_label))
1014 goto err_free_p; 1017 goto err_free_p;
1015 1018
1019 p->a_label = nfs4_label_alloc(server, gfp_mask);
1020 if (IS_ERR(p->a_label))
1021 goto err_free_f;
1022
1016 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 1023 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
1017 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask); 1024 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask);
1018 if (IS_ERR(p->o_arg.seqid)) 1025 if (IS_ERR(p->o_arg.seqid))
@@ -1041,7 +1048,7 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
1041 p->o_arg.server = server; 1048 p->o_arg.server = server;
1042 p->o_arg.bitmask = nfs4_bitmask(server, label); 1049 p->o_arg.bitmask = nfs4_bitmask(server, label);
1043 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0]; 1050 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
1044 p->o_arg.label = label; 1051 p->o_arg.label = nfs4_label_copy(p->a_label, label);
1045 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim); 1052 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim);
1046 switch (p->o_arg.claim) { 1053 switch (p->o_arg.claim) {
1047 case NFS4_OPEN_CLAIM_NULL: 1054 case NFS4_OPEN_CLAIM_NULL:
@@ -1074,6 +1081,8 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
1074 return p; 1081 return p;
1075 1082
1076err_free_label: 1083err_free_label:
1084 nfs4_label_free(p->a_label);
1085err_free_f:
1077 nfs4_label_free(p->f_label); 1086 nfs4_label_free(p->f_label);
1078err_free_p: 1087err_free_p:
1079 kfree(p); 1088 kfree(p);
@@ -1093,6 +1102,7 @@ static void nfs4_opendata_free(struct kref *kref)
1093 nfs4_put_open_state(p->state); 1102 nfs4_put_open_state(p->state);
1094 nfs4_put_state_owner(p->owner); 1103 nfs4_put_state_owner(p->owner);
1095 1104
1105 nfs4_label_free(p->a_label);
1096 nfs4_label_free(p->f_label); 1106 nfs4_label_free(p->f_label);
1097 1107
1098 dput(p->dir); 1108 dput(p->dir);
@@ -1198,12 +1208,15 @@ static bool nfs_need_update_open_stateid(struct nfs4_state *state,
1198 1208
1199static void nfs_resync_open_stateid_locked(struct nfs4_state *state) 1209static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
1200{ 1210{
1211 if (!(state->n_wronly || state->n_rdonly || state->n_rdwr))
1212 return;
1201 if (state->n_wronly) 1213 if (state->n_wronly)
1202 set_bit(NFS_O_WRONLY_STATE, &state->flags); 1214 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1203 if (state->n_rdonly) 1215 if (state->n_rdonly)
1204 set_bit(NFS_O_RDONLY_STATE, &state->flags); 1216 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1205 if (state->n_rdwr) 1217 if (state->n_rdwr)
1206 set_bit(NFS_O_RDWR_STATE, &state->flags); 1218 set_bit(NFS_O_RDWR_STATE, &state->flags);
1219 set_bit(NFS_OPEN_STATE, &state->flags);
1207} 1220}
1208 1221
1209static void nfs_clear_open_stateid_locked(struct nfs4_state *state, 1222static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
@@ -5439,15 +5452,15 @@ static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *
5439 return err; 5452 return err;
5440} 5453}
5441 5454
5442static int do_vfs_lock(struct file *file, struct file_lock *fl) 5455static int do_vfs_lock(struct inode *inode, struct file_lock *fl)
5443{ 5456{
5444 int res = 0; 5457 int res = 0;
5445 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) { 5458 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
5446 case FL_POSIX: 5459 case FL_POSIX:
5447 res = posix_lock_file_wait(file, fl); 5460 res = posix_lock_inode_wait(inode, fl);
5448 break; 5461 break;
5449 case FL_FLOCK: 5462 case FL_FLOCK:
5450 res = flock_lock_file_wait(file, fl); 5463 res = flock_lock_inode_wait(inode, fl);
5451 break; 5464 break;
5452 default: 5465 default:
5453 BUG(); 5466 BUG();
@@ -5484,7 +5497,6 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
5484 atomic_inc(&lsp->ls_count); 5497 atomic_inc(&lsp->ls_count);
5485 /* Ensure we don't close file until we're done freeing locks! */ 5498 /* Ensure we don't close file until we're done freeing locks! */
5486 p->ctx = get_nfs_open_context(ctx); 5499 p->ctx = get_nfs_open_context(ctx);
5487 get_file(fl->fl_file);
5488 memcpy(&p->fl, fl, sizeof(p->fl)); 5500 memcpy(&p->fl, fl, sizeof(p->fl));
5489 p->server = NFS_SERVER(inode); 5501 p->server = NFS_SERVER(inode);
5490 return p; 5502 return p;
@@ -5496,7 +5508,6 @@ static void nfs4_locku_release_calldata(void *data)
5496 nfs_free_seqid(calldata->arg.seqid); 5508 nfs_free_seqid(calldata->arg.seqid);
5497 nfs4_put_lock_state(calldata->lsp); 5509 nfs4_put_lock_state(calldata->lsp);
5498 put_nfs_open_context(calldata->ctx); 5510 put_nfs_open_context(calldata->ctx);
5499 fput(calldata->fl.fl_file);
5500 kfree(calldata); 5511 kfree(calldata);
5501} 5512}
5502 5513
@@ -5509,7 +5520,7 @@ static void nfs4_locku_done(struct rpc_task *task, void *data)
5509 switch (task->tk_status) { 5520 switch (task->tk_status) {
5510 case 0: 5521 case 0:
5511 renew_lease(calldata->server, calldata->timestamp); 5522 renew_lease(calldata->server, calldata->timestamp);
5512 do_vfs_lock(calldata->fl.fl_file, &calldata->fl); 5523 do_vfs_lock(calldata->lsp->ls_state->inode, &calldata->fl);
5513 if (nfs4_update_lock_stateid(calldata->lsp, 5524 if (nfs4_update_lock_stateid(calldata->lsp,
5514 &calldata->res.stateid)) 5525 &calldata->res.stateid))
5515 break; 5526 break;
@@ -5617,7 +5628,7 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *
5617 mutex_lock(&sp->so_delegreturn_mutex); 5628 mutex_lock(&sp->so_delegreturn_mutex);
5618 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */ 5629 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */
5619 down_read(&nfsi->rwsem); 5630 down_read(&nfsi->rwsem);
5620 if (do_vfs_lock(request->fl_file, request) == -ENOENT) { 5631 if (do_vfs_lock(inode, request) == -ENOENT) {
5621 up_read(&nfsi->rwsem); 5632 up_read(&nfsi->rwsem);
5622 mutex_unlock(&sp->so_delegreturn_mutex); 5633 mutex_unlock(&sp->so_delegreturn_mutex);
5623 goto out; 5634 goto out;
@@ -5758,7 +5769,7 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
5758 data->timestamp); 5769 data->timestamp);
5759 if (data->arg.new_lock) { 5770 if (data->arg.new_lock) {
5760 data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS); 5771 data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
5761 if (do_vfs_lock(data->fl.fl_file, &data->fl) < 0) { 5772 if (do_vfs_lock(lsp->ls_state->inode, &data->fl) < 0) {
5762 rpc_restart_call_prepare(task); 5773 rpc_restart_call_prepare(task);
5763 break; 5774 break;
5764 } 5775 }
@@ -6000,7 +6011,7 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock
6000 if (status != 0) 6011 if (status != 0)
6001 goto out; 6012 goto out;
6002 request->fl_flags |= FL_ACCESS; 6013 request->fl_flags |= FL_ACCESS;
6003 status = do_vfs_lock(request->fl_file, request); 6014 status = do_vfs_lock(state->inode, request);
6004 if (status < 0) 6015 if (status < 0)
6005 goto out; 6016 goto out;
6006 down_read(&nfsi->rwsem); 6017 down_read(&nfsi->rwsem);
@@ -6008,7 +6019,7 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock
6008 /* Yes: cache locks! */ 6019 /* Yes: cache locks! */
6009 /* ...but avoid races with delegation recall... */ 6020 /* ...but avoid races with delegation recall... */
6010 request->fl_flags = fl_flags & ~FL_SLEEP; 6021 request->fl_flags = fl_flags & ~FL_SLEEP;
6011 status = do_vfs_lock(request->fl_file, request); 6022 status = do_vfs_lock(state->inode, request);
6012 up_read(&nfsi->rwsem); 6023 up_read(&nfsi->rwsem);
6013 goto out; 6024 goto out;
6014 } 6025 }
@@ -7573,13 +7584,8 @@ static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
7573 goto out; 7584 goto out;
7574 } 7585 }
7575 ret = rpc_wait_for_completion_task(task); 7586 ret = rpc_wait_for_completion_task(task);
7576 if (!ret) { 7587 if (!ret)
7577 struct nfs4_sequence_res *res = task->tk_msg.rpc_resp;
7578
7579 if (task->tk_status == 0)
7580 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
7581 ret = task->tk_status; 7588 ret = task->tk_status;
7582 }
7583 rpc_put_task(task); 7589 rpc_put_task(task);
7584out: 7590out:
7585 dprintk("<-- %s status=%d\n", __func__, ret); 7591 dprintk("<-- %s status=%d\n", __func__, ret);
@@ -7967,16 +7973,17 @@ static void nfs4_layoutreturn_release(void *calldata)
7967{ 7973{
7968 struct nfs4_layoutreturn *lrp = calldata; 7974 struct nfs4_layoutreturn *lrp = calldata;
7969 struct pnfs_layout_hdr *lo = lrp->args.layout; 7975 struct pnfs_layout_hdr *lo = lrp->args.layout;
7976 LIST_HEAD(freeme);
7970 7977
7971 dprintk("--> %s\n", __func__); 7978 dprintk("--> %s\n", __func__);
7972 spin_lock(&lo->plh_inode->i_lock); 7979 spin_lock(&lo->plh_inode->i_lock);
7973 if (lrp->res.lrs_present) 7980 if (lrp->res.lrs_present)
7974 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true); 7981 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
7982 pnfs_mark_matching_lsegs_invalid(lo, &freeme, &lrp->args.range);
7975 pnfs_clear_layoutreturn_waitbit(lo); 7983 pnfs_clear_layoutreturn_waitbit(lo);
7976 clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, &lo->plh_flags);
7977 rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
7978 lo->plh_block_lgets--; 7984 lo->plh_block_lgets--;
7979 spin_unlock(&lo->plh_inode->i_lock); 7985 spin_unlock(&lo->plh_inode->i_lock);
7986 pnfs_free_lseg_list(&freeme);
7980 pnfs_put_layout_hdr(lrp->args.layout); 7987 pnfs_put_layout_hdr(lrp->args.layout);
7981 nfs_iput_and_deactive(lrp->inode); 7988 nfs_iput_and_deactive(lrp->inode);
7982 kfree(calldata); 7989 kfree(calldata);
@@ -8590,7 +8597,6 @@ static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
8590 .minor_version = 0, 8597 .minor_version = 0,
8591 .init_caps = NFS_CAP_READDIRPLUS 8598 .init_caps = NFS_CAP_READDIRPLUS
8592 | NFS_CAP_ATOMIC_OPEN 8599 | NFS_CAP_ATOMIC_OPEN
8593 | NFS_CAP_CHANGE_ATTR
8594 | NFS_CAP_POSIX_LOCK, 8600 | NFS_CAP_POSIX_LOCK,
8595 .init_client = nfs40_init_client, 8601 .init_client = nfs40_init_client,
8596 .shutdown_client = nfs40_shutdown_client, 8602 .shutdown_client = nfs40_shutdown_client,
@@ -8616,7 +8622,6 @@ static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
8616 .minor_version = 1, 8622 .minor_version = 1,
8617 .init_caps = NFS_CAP_READDIRPLUS 8623 .init_caps = NFS_CAP_READDIRPLUS
8618 | NFS_CAP_ATOMIC_OPEN 8624 | NFS_CAP_ATOMIC_OPEN
8619 | NFS_CAP_CHANGE_ATTR
8620 | NFS_CAP_POSIX_LOCK 8625 | NFS_CAP_POSIX_LOCK
8621 | NFS_CAP_STATEID_NFSV41 8626 | NFS_CAP_STATEID_NFSV41
8622 | NFS_CAP_ATOMIC_OPEN_V1, 8627 | NFS_CAP_ATOMIC_OPEN_V1,
@@ -8639,7 +8644,6 @@ static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
8639 .minor_version = 2, 8644 .minor_version = 2,
8640 .init_caps = NFS_CAP_READDIRPLUS 8645 .init_caps = NFS_CAP_READDIRPLUS
8641 | NFS_CAP_ATOMIC_OPEN 8646 | NFS_CAP_ATOMIC_OPEN
8642 | NFS_CAP_CHANGE_ATTR
8643 | NFS_CAP_POSIX_LOCK 8647 | NFS_CAP_POSIX_LOCK
8644 | NFS_CAP_STATEID_NFSV41 8648 | NFS_CAP_STATEID_NFSV41
8645 | NFS_CAP_ATOMIC_OPEN_V1 8649 | NFS_CAP_ATOMIC_OPEN_V1
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 605840dc89cf..f2e2ad894461 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -2191,25 +2191,35 @@ static void nfs41_handle_server_reboot(struct nfs_client *clp)
2191 } 2191 }
2192} 2192}
2193 2193
2194static void nfs41_handle_state_revoked(struct nfs_client *clp) 2194static void nfs41_handle_all_state_revoked(struct nfs_client *clp)
2195{ 2195{
2196 nfs4_reset_all_state(clp); 2196 nfs4_reset_all_state(clp);
2197 dprintk("%s: state revoked on server %s\n", __func__, clp->cl_hostname); 2197 dprintk("%s: state revoked on server %s\n", __func__, clp->cl_hostname);
2198} 2198}
2199 2199
2200static void nfs41_handle_some_state_revoked(struct nfs_client *clp)
2201{
2202 nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_nograce);
2203 nfs4_schedule_state_manager(clp);
2204
2205 dprintk("%s: state revoked on server %s\n", __func__, clp->cl_hostname);
2206}
2207
2200static void nfs41_handle_recallable_state_revoked(struct nfs_client *clp) 2208static void nfs41_handle_recallable_state_revoked(struct nfs_client *clp)
2201{ 2209{
2202 /* This will need to handle layouts too */ 2210 /* FIXME: For now, we destroy all layouts. */
2203 nfs_expire_all_delegations(clp); 2211 pnfs_destroy_all_layouts(clp);
2212 /* FIXME: For now, we test all delegations+open state+locks. */
2213 nfs41_handle_some_state_revoked(clp);
2204 dprintk("%s: Recallable state revoked on server %s!\n", __func__, 2214 dprintk("%s: Recallable state revoked on server %s!\n", __func__,
2205 clp->cl_hostname); 2215 clp->cl_hostname);
2206} 2216}
2207 2217
2208static void nfs41_handle_backchannel_fault(struct nfs_client *clp) 2218static void nfs41_handle_backchannel_fault(struct nfs_client *clp)
2209{ 2219{
2210 nfs_expire_all_delegations(clp); 2220 set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
2211 if (test_and_set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) == 0) 2221 nfs4_schedule_state_manager(clp);
2212 nfs4_schedule_state_manager(clp); 2222
2213 dprintk("%s: server %s declared a backchannel fault\n", __func__, 2223 dprintk("%s: server %s declared a backchannel fault\n", __func__,
2214 clp->cl_hostname); 2224 clp->cl_hostname);
2215} 2225}
@@ -2231,10 +2241,11 @@ void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
2231 2241
2232 if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED) 2242 if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
2233 nfs41_handle_server_reboot(clp); 2243 nfs41_handle_server_reboot(clp);
2234 if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED | 2244 if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED))
2235 SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED | 2245 nfs41_handle_all_state_revoked(clp);
2246 if (flags & (SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED |
2236 SEQ4_STATUS_ADMIN_STATE_REVOKED)) 2247 SEQ4_STATUS_ADMIN_STATE_REVOKED))
2237 nfs41_handle_state_revoked(clp); 2248 nfs41_handle_some_state_revoked(clp);
2238 if (flags & SEQ4_STATUS_LEASE_MOVED) 2249 if (flags & SEQ4_STATUS_LEASE_MOVED)
2239 nfs4_schedule_lease_moved_recovery(clp); 2250 nfs4_schedule_lease_moved_recovery(clp);
2240 if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED) 2251 if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 1da68d3b1eda..4984bbe55ff1 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -1100,8 +1100,6 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
1100 mirror->pg_base = 0; 1100 mirror->pg_base = 0;
1101 mirror->pg_recoalesce = 0; 1101 mirror->pg_recoalesce = 0;
1102 1102
1103 desc->pg_moreio = 0;
1104
1105 while (!list_empty(&head)) { 1103 while (!list_empty(&head)) {
1106 struct nfs_page *req; 1104 struct nfs_page *req;
1107 1105
@@ -1109,8 +1107,11 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
1109 nfs_list_remove_request(req); 1107 nfs_list_remove_request(req);
1110 if (__nfs_pageio_add_request(desc, req)) 1108 if (__nfs_pageio_add_request(desc, req))
1111 continue; 1109 continue;
1112 if (desc->pg_error < 0) 1110 if (desc->pg_error < 0) {
1111 list_splice_tail(&head, &mirror->pg_list);
1112 mirror->pg_recoalesce = 1;
1113 return 0; 1113 return 0;
1114 }
1114 break; 1115 break;
1115 } 1116 }
1116 } while (mirror->pg_recoalesce); 1117 } while (mirror->pg_recoalesce);
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 0ba9a02c9566..70bf706b1090 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -352,7 +352,7 @@ pnfs_layout_need_return(struct pnfs_layout_hdr *lo,
352{ 352{
353 struct pnfs_layout_segment *s; 353 struct pnfs_layout_segment *s;
354 354
355 if (!test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)) 355 if (!test_and_clear_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
356 return false; 356 return false;
357 357
358 list_for_each_entry(s, &lo->plh_segs, pls_list) 358 list_for_each_entry(s, &lo->plh_segs, pls_list)
@@ -362,6 +362,18 @@ pnfs_layout_need_return(struct pnfs_layout_hdr *lo,
362 return true; 362 return true;
363} 363}
364 364
365static bool
366pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo)
367{
368 if (test_and_set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
369 return false;
370 lo->plh_return_iomode = 0;
371 lo->plh_block_lgets++;
372 pnfs_get_layout_hdr(lo);
373 clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, &lo->plh_flags);
374 return true;
375}
376
365static void pnfs_layoutreturn_before_put_lseg(struct pnfs_layout_segment *lseg, 377static void pnfs_layoutreturn_before_put_lseg(struct pnfs_layout_segment *lseg,
366 struct pnfs_layout_hdr *lo, struct inode *inode) 378 struct pnfs_layout_hdr *lo, struct inode *inode)
367{ 379{
@@ -372,17 +384,16 @@ static void pnfs_layoutreturn_before_put_lseg(struct pnfs_layout_segment *lseg,
372 if (pnfs_layout_need_return(lo, lseg)) { 384 if (pnfs_layout_need_return(lo, lseg)) {
373 nfs4_stateid stateid; 385 nfs4_stateid stateid;
374 enum pnfs_iomode iomode; 386 enum pnfs_iomode iomode;
387 bool send;
375 388
376 stateid = lo->plh_stateid; 389 stateid = lo->plh_stateid;
377 iomode = lo->plh_return_iomode; 390 iomode = lo->plh_return_iomode;
378 /* decreased in pnfs_send_layoutreturn() */ 391 send = pnfs_prepare_layoutreturn(lo);
379 lo->plh_block_lgets++;
380 lo->plh_return_iomode = 0;
381 spin_unlock(&inode->i_lock); 392 spin_unlock(&inode->i_lock);
382 pnfs_get_layout_hdr(lo); 393 if (send) {
383 394 /* Send an async layoutreturn so we dont deadlock */
384 /* Send an async layoutreturn so we dont deadlock */ 395 pnfs_send_layoutreturn(lo, stateid, iomode, false);
385 pnfs_send_layoutreturn(lo, stateid, iomode, false); 396 }
386 } else 397 } else
387 spin_unlock(&inode->i_lock); 398 spin_unlock(&inode->i_lock);
388} 399}
@@ -411,6 +422,10 @@ pnfs_put_lseg(struct pnfs_layout_segment *lseg)
411 pnfs_layoutreturn_before_put_lseg(lseg, lo, inode); 422 pnfs_layoutreturn_before_put_lseg(lseg, lo, inode);
412 423
413 if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) { 424 if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
425 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
426 spin_unlock(&inode->i_lock);
427 return;
428 }
414 pnfs_get_layout_hdr(lo); 429 pnfs_get_layout_hdr(lo);
415 pnfs_layout_remove_lseg(lo, lseg); 430 pnfs_layout_remove_lseg(lo, lseg);
416 spin_unlock(&inode->i_lock); 431 spin_unlock(&inode->i_lock);
@@ -451,6 +466,8 @@ pnfs_put_lseg_locked(struct pnfs_layout_segment *lseg)
451 test_bit(NFS_LSEG_VALID, &lseg->pls_flags)); 466 test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
452 if (atomic_dec_and_test(&lseg->pls_refcount)) { 467 if (atomic_dec_and_test(&lseg->pls_refcount)) {
453 struct pnfs_layout_hdr *lo = lseg->pls_layout; 468 struct pnfs_layout_hdr *lo = lseg->pls_layout;
469 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags))
470 return;
454 pnfs_get_layout_hdr(lo); 471 pnfs_get_layout_hdr(lo);
455 pnfs_layout_remove_lseg(lo, lseg); 472 pnfs_layout_remove_lseg(lo, lseg);
456 pnfs_free_lseg_async(lseg); 473 pnfs_free_lseg_async(lseg);
@@ -924,6 +941,7 @@ void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo)
924 clear_bit_unlock(NFS_LAYOUT_RETURN, &lo->plh_flags); 941 clear_bit_unlock(NFS_LAYOUT_RETURN, &lo->plh_flags);
925 smp_mb__after_atomic(); 942 smp_mb__after_atomic();
926 wake_up_bit(&lo->plh_flags, NFS_LAYOUT_RETURN); 943 wake_up_bit(&lo->plh_flags, NFS_LAYOUT_RETURN);
944 rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
927} 945}
928 946
929static int 947static int
@@ -978,6 +996,7 @@ _pnfs_return_layout(struct inode *ino)
978 LIST_HEAD(tmp_list); 996 LIST_HEAD(tmp_list);
979 nfs4_stateid stateid; 997 nfs4_stateid stateid;
980 int status = 0, empty; 998 int status = 0, empty;
999 bool send;
981 1000
982 dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino); 1001 dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino);
983 1002
@@ -1007,17 +1026,18 @@ _pnfs_return_layout(struct inode *ino)
1007 /* Don't send a LAYOUTRETURN if list was initially empty */ 1026 /* Don't send a LAYOUTRETURN if list was initially empty */
1008 if (empty) { 1027 if (empty) {
1009 spin_unlock(&ino->i_lock); 1028 spin_unlock(&ino->i_lock);
1010 pnfs_put_layout_hdr(lo);
1011 dprintk("NFS: %s no layout segments to return\n", __func__); 1029 dprintk("NFS: %s no layout segments to return\n", __func__);
1012 goto out; 1030 goto out_put_layout_hdr;
1013 } 1031 }
1014 1032
1015 set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags); 1033 set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
1016 lo->plh_block_lgets++; 1034 send = pnfs_prepare_layoutreturn(lo);
1017 spin_unlock(&ino->i_lock); 1035 spin_unlock(&ino->i_lock);
1018 pnfs_free_lseg_list(&tmp_list); 1036 pnfs_free_lseg_list(&tmp_list);
1019 1037 if (send)
1020 status = pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, true); 1038 status = pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, true);
1039out_put_layout_hdr:
1040 pnfs_put_layout_hdr(lo);
1021out: 1041out:
1022 dprintk("<-- %s status: %d\n", __func__, status); 1042 dprintk("<-- %s status: %d\n", __func__, status);
1023 return status; 1043 return status;
@@ -1097,13 +1117,9 @@ bool pnfs_roc(struct inode *ino)
1097out_noroc: 1117out_noroc:
1098 if (lo) { 1118 if (lo) {
1099 stateid = lo->plh_stateid; 1119 stateid = lo->plh_stateid;
1100 layoutreturn = 1120 if (test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
1101 test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, 1121 &lo->plh_flags))
1102 &lo->plh_flags); 1122 layoutreturn = pnfs_prepare_layoutreturn(lo);
1103 if (layoutreturn) {
1104 lo->plh_block_lgets++;
1105 pnfs_get_layout_hdr(lo);
1106 }
1107 } 1123 }
1108 spin_unlock(&ino->i_lock); 1124 spin_unlock(&ino->i_lock);
1109 if (layoutreturn) { 1125 if (layoutreturn) {
@@ -1146,15 +1162,18 @@ bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task)
1146 struct pnfs_layout_segment *lseg; 1162 struct pnfs_layout_segment *lseg;
1147 nfs4_stateid stateid; 1163 nfs4_stateid stateid;
1148 u32 current_seqid; 1164 u32 current_seqid;
1149 bool found = false, layoutreturn = false; 1165 bool layoutreturn = false;
1150 1166
1151 spin_lock(&ino->i_lock); 1167 spin_lock(&ino->i_lock);
1152 list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list) 1168 list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list) {
1153 if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) { 1169 if (!test_bit(NFS_LSEG_ROC, &lseg->pls_flags))
1154 rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL); 1170 continue;
1155 found = true; 1171 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags))
1156 goto out; 1172 continue;
1157 } 1173 rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
1174 spin_unlock(&ino->i_lock);
1175 return true;
1176 }
1158 lo = nfsi->layout; 1177 lo = nfsi->layout;
1159 current_seqid = be32_to_cpu(lo->plh_stateid.seqid); 1178 current_seqid = be32_to_cpu(lo->plh_stateid.seqid);
1160 1179
@@ -1162,23 +1181,19 @@ bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task)
1162 * a barrier, we choose the worst-case barrier. 1181 * a barrier, we choose the worst-case barrier.
1163 */ 1182 */
1164 *barrier = current_seqid + atomic_read(&lo->plh_outstanding); 1183 *barrier = current_seqid + atomic_read(&lo->plh_outstanding);
1165out: 1184 stateid = lo->plh_stateid;
1166 if (!found) { 1185 if (test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
1167 stateid = lo->plh_stateid; 1186 &lo->plh_flags))
1168 layoutreturn = 1187 layoutreturn = pnfs_prepare_layoutreturn(lo);
1169 test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, 1188 if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
1170 &lo->plh_flags); 1189 rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
1171 if (layoutreturn) { 1190
1172 lo->plh_block_lgets++;
1173 pnfs_get_layout_hdr(lo);
1174 }
1175 }
1176 spin_unlock(&ino->i_lock); 1191 spin_unlock(&ino->i_lock);
1177 if (layoutreturn) { 1192 if (layoutreturn) {
1178 rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
1179 pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, false); 1193 pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, false);
1194 return true;
1180 } 1195 }
1181 return found; 1196 return false;
1182} 1197}
1183 1198
1184/* 1199/*
@@ -1695,7 +1710,6 @@ void pnfs_error_mark_layout_for_return(struct inode *inode,
1695 spin_lock(&inode->i_lock); 1710 spin_lock(&inode->i_lock);
1696 /* set failure bit so that pnfs path will be retried later */ 1711 /* set failure bit so that pnfs path will be retried later */
1697 pnfs_layout_set_fail_bit(lo, iomode); 1712 pnfs_layout_set_fail_bit(lo, iomode);
1698 set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
1699 if (lo->plh_return_iomode == 0) 1713 if (lo->plh_return_iomode == 0)
1700 lo->plh_return_iomode = range.iomode; 1714 lo->plh_return_iomode = range.iomode;
1701 else if (lo->plh_return_iomode != range.iomode) 1715 else if (lo->plh_return_iomode != range.iomode)
@@ -2207,13 +2221,12 @@ pnfs_layoutcommit_inode(struct inode *inode, bool sync)
2207 if (ld->prepare_layoutcommit) { 2221 if (ld->prepare_layoutcommit) {
2208 status = ld->prepare_layoutcommit(&data->args); 2222 status = ld->prepare_layoutcommit(&data->args);
2209 if (status) { 2223 if (status) {
2224 put_rpccred(data->cred);
2210 spin_lock(&inode->i_lock); 2225 spin_lock(&inode->i_lock);
2211 set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags); 2226 set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags);
2212 if (end_pos > nfsi->layout->plh_lwb) 2227 if (end_pos > nfsi->layout->plh_lwb)
2213 nfsi->layout->plh_lwb = end_pos; 2228 nfsi->layout->plh_lwb = end_pos;
2214 spin_unlock(&inode->i_lock); 2229 goto out_unlock;
2215 put_rpccred(data->cred);
2216 goto clear_layoutcommitting;
2217 } 2230 }
2218 } 2231 }
2219 2232
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 65869ca9c851..75a35a1afa79 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1379,24 +1379,27 @@ static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr,
1379{ 1379{
1380 struct nfs_pgio_args *argp = &hdr->args; 1380 struct nfs_pgio_args *argp = &hdr->args;
1381 struct nfs_pgio_res *resp = &hdr->res; 1381 struct nfs_pgio_res *resp = &hdr->res;
1382 u64 size = argp->offset + resp->count;
1382 1383
1383 if (!(fattr->valid & NFS_ATTR_FATTR_SIZE)) 1384 if (!(fattr->valid & NFS_ATTR_FATTR_SIZE))
1385 fattr->size = size;
1386 if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode)) {
1387 fattr->valid &= ~NFS_ATTR_FATTR_SIZE;
1384 return; 1388 return;
1385 if (argp->offset + resp->count != fattr->size) 1389 }
1386 return; 1390 if (size != fattr->size)
1387 if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode))
1388 return; 1391 return;
1389 /* Set attribute barrier */ 1392 /* Set attribute barrier */
1390 nfs_fattr_set_barrier(fattr); 1393 nfs_fattr_set_barrier(fattr);
1394 /* ...and update size */
1395 fattr->valid |= NFS_ATTR_FATTR_SIZE;
1391} 1396}
1392 1397
1393void nfs_writeback_update_inode(struct nfs_pgio_header *hdr) 1398void nfs_writeback_update_inode(struct nfs_pgio_header *hdr)
1394{ 1399{
1395 struct nfs_fattr *fattr = hdr->res.fattr; 1400 struct nfs_fattr *fattr = &hdr->fattr;
1396 struct inode *inode = hdr->inode; 1401 struct inode *inode = hdr->inode;
1397 1402
1398 if (fattr == NULL)
1399 return;
1400 spin_lock(&inode->i_lock); 1403 spin_lock(&inode->i_lock);
1401 nfs_writeback_check_extend(hdr, fattr); 1404 nfs_writeback_check_extend(hdr, fattr);
1402 nfs_post_op_update_inode_force_wcc_locked(inode, fattr); 1405 nfs_post_op_update_inode_force_wcc_locked(inode, fattr);
diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
index 6904213a4363..ebf90e487c75 100644
--- a/fs/nfsd/nfs4layouts.c
+++ b/fs/nfsd/nfs4layouts.c
@@ -212,6 +212,7 @@ nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate,
212 BUG_ON(!ls->ls_file); 212 BUG_ON(!ls->ls_file);
213 213
214 if (nfsd4_layout_setlease(ls)) { 214 if (nfsd4_layout_setlease(ls)) {
215 fput(ls->ls_file);
215 put_nfs4_file(fp); 216 put_nfs4_file(fp);
216 kmem_cache_free(nfs4_layout_stateid_cache, ls); 217 kmem_cache_free(nfs4_layout_stateid_cache, ls);
217 return NULL; 218 return NULL;
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 61dfb33f0559..95202719a1fd 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -4396,9 +4396,9 @@ laundromat_main(struct work_struct *laundry)
4396 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ); 4396 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
4397} 4397}
4398 4398
4399static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp) 4399static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
4400{ 4400{
4401 if (!fh_match(&fhp->fh_handle, &stp->st_stid.sc_file->fi_fhandle)) 4401 if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
4402 return nfserr_bad_stateid; 4402 return nfserr_bad_stateid;
4403 return nfs_ok; 4403 return nfs_ok;
4404} 4404}
@@ -4601,9 +4601,6 @@ nfs4_check_olstateid(struct svc_fh *fhp, struct nfs4_ol_stateid *ols, int flags)
4601{ 4601{
4602 __be32 status; 4602 __be32 status;
4603 4603
4604 status = nfs4_check_fh(fhp, ols);
4605 if (status)
4606 return status;
4607 status = nfsd4_check_openowner_confirmed(ols); 4604 status = nfsd4_check_openowner_confirmed(ols);
4608 if (status) 4605 if (status)
4609 return status; 4606 return status;
@@ -4690,6 +4687,9 @@ nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
4690 status = nfserr_bad_stateid; 4687 status = nfserr_bad_stateid;
4691 break; 4688 break;
4692 } 4689 }
4690 if (status)
4691 goto out;
4692 status = nfs4_check_fh(fhp, s);
4693 4693
4694done: 4694done:
4695 if (!status && filpp) 4695 if (!status && filpp)
@@ -4798,7 +4798,7 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_
4798 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate)); 4798 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
4799 if (status) 4799 if (status)
4800 return status; 4800 return status;
4801 return nfs4_check_fh(current_fh, stp); 4801 return nfs4_check_fh(current_fh, &stp->st_stid);
4802} 4802}
4803 4803
4804/* 4804/*
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 54633858733a..75e0563c09d1 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -2143,6 +2143,7 @@ nfsd4_encode_aclname(struct xdr_stream *xdr, struct svc_rqst *rqstp,
2143#define WORD0_ABSENT_FS_ATTRS (FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_FSID | \ 2143#define WORD0_ABSENT_FS_ATTRS (FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_FSID | \
2144 FATTR4_WORD0_RDATTR_ERROR) 2144 FATTR4_WORD0_RDATTR_ERROR)
2145#define WORD1_ABSENT_FS_ATTRS FATTR4_WORD1_MOUNTED_ON_FILEID 2145#define WORD1_ABSENT_FS_ATTRS FATTR4_WORD1_MOUNTED_ON_FILEID
2146#define WORD2_ABSENT_FS_ATTRS 0
2146 2147
2147#ifdef CONFIG_NFSD_V4_SECURITY_LABEL 2148#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
2148static inline __be32 2149static inline __be32
@@ -2171,7 +2172,7 @@ nfsd4_encode_security_label(struct xdr_stream *xdr, struct svc_rqst *rqstp,
2171{ return 0; } 2172{ return 0; }
2172#endif 2173#endif
2173 2174
2174static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *rdattr_err) 2175static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *bmval2, u32 *rdattr_err)
2175{ 2176{
2176 /* As per referral draft: */ 2177 /* As per referral draft: */
2177 if (*bmval0 & ~WORD0_ABSENT_FS_ATTRS || 2178 if (*bmval0 & ~WORD0_ABSENT_FS_ATTRS ||
@@ -2184,6 +2185,7 @@ static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *rdattr_err)
2184 } 2185 }
2185 *bmval0 &= WORD0_ABSENT_FS_ATTRS; 2186 *bmval0 &= WORD0_ABSENT_FS_ATTRS;
2186 *bmval1 &= WORD1_ABSENT_FS_ATTRS; 2187 *bmval1 &= WORD1_ABSENT_FS_ATTRS;
2188 *bmval2 &= WORD2_ABSENT_FS_ATTRS;
2187 return 0; 2189 return 0;
2188} 2190}
2189 2191
@@ -2246,8 +2248,7 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
2246 BUG_ON(bmval2 & ~nfsd_suppattrs2(minorversion)); 2248 BUG_ON(bmval2 & ~nfsd_suppattrs2(minorversion));
2247 2249
2248 if (exp->ex_fslocs.migrated) { 2250 if (exp->ex_fslocs.migrated) {
2249 BUG_ON(bmval[2]); 2251 status = fattr_handle_absent_fs(&bmval0, &bmval1, &bmval2, &rdattr_err);
2250 status = fattr_handle_absent_fs(&bmval0, &bmval1, &rdattr_err);
2251 if (status) 2252 if (status)
2252 goto out; 2253 goto out;
2253 } 2254 }
@@ -2286,8 +2287,8 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
2286 } 2287 }
2287 2288
2288#ifdef CONFIG_NFSD_V4_SECURITY_LABEL 2289#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
2289 if ((bmval[2] & FATTR4_WORD2_SECURITY_LABEL) || 2290 if ((bmval2 & FATTR4_WORD2_SECURITY_LABEL) ||
2290 bmval[0] & FATTR4_WORD0_SUPPORTED_ATTRS) { 2291 bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) {
2291 err = security_inode_getsecctx(d_inode(dentry), 2292 err = security_inode_getsecctx(d_inode(dentry),
2292 &context, &contextlen); 2293 &context, &contextlen);
2293 contextsupport = (err == 0); 2294 contextsupport = (err == 0);
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index 92e48c70f0f0..39ddcaf0918f 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -412,16 +412,36 @@ void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group,
412 unsigned int flags) 412 unsigned int flags)
413{ 413{
414 struct fsnotify_mark *lmark, *mark; 414 struct fsnotify_mark *lmark, *mark;
415 LIST_HEAD(to_free);
415 416
417 /*
418 * We have to be really careful here. Anytime we drop mark_mutex, e.g.
419 * fsnotify_clear_marks_by_inode() can come and free marks. Even in our
420 * to_free list so we have to use mark_mutex even when accessing that
421 * list. And freeing mark requires us to drop mark_mutex. So we can
422 * reliably free only the first mark in the list. That's why we first
423 * move marks to free to to_free list in one go and then free marks in
424 * to_free list one by one.
425 */
416 mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING); 426 mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
417 list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) { 427 list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
418 if (mark->flags & flags) { 428 if (mark->flags & flags)
419 fsnotify_get_mark(mark); 429 list_move(&mark->g_list, &to_free);
420 fsnotify_destroy_mark_locked(mark, group);
421 fsnotify_put_mark(mark);
422 }
423 } 430 }
424 mutex_unlock(&group->mark_mutex); 431 mutex_unlock(&group->mark_mutex);
432
433 while (1) {
434 mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
435 if (list_empty(&to_free)) {
436 mutex_unlock(&group->mark_mutex);
437 break;
438 }
439 mark = list_first_entry(&to_free, struct fsnotify_mark, g_list);
440 fsnotify_get_mark(mark);
441 fsnotify_destroy_mark_locked(mark, group);
442 mutex_unlock(&group->mark_mutex);
443 fsnotify_put_mark(mark);
444 }
425} 445}
426 446
427/* 447/*
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 1a35c6139656..0f5fd9db8194 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -685,7 +685,7 @@ static int ocfs2_direct_IO_zero_extend(struct ocfs2_super *osb,
685 685
686 if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) { 686 if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) {
687 u64 s = i_size_read(inode); 687 u64 s = i_size_read(inode);
688 sector_t sector = (p_cpos << (osb->s_clustersize_bits - 9)) + 688 sector_t sector = ((u64)p_cpos << (osb->s_clustersize_bits - 9)) +
689 (do_div(s, osb->s_clustersize) >> 9); 689 (do_div(s, osb->s_clustersize) >> 9);
690 690
691 ret = blkdev_issue_zeroout(osb->sb->s_bdev, sector, 691 ret = blkdev_issue_zeroout(osb->sb->s_bdev, sector,
@@ -910,7 +910,7 @@ static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb,
910 BUG_ON(!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN)); 910 BUG_ON(!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN));
911 911
912 ret = blkdev_issue_zeroout(osb->sb->s_bdev, 912 ret = blkdev_issue_zeroout(osb->sb->s_bdev,
913 p_cpos << (osb->s_clustersize_bits - 9), 913 (u64)p_cpos << (osb->s_clustersize_bits - 9),
914 zero_len_head >> 9, GFP_NOFS, false); 914 zero_len_head >> 9, GFP_NOFS, false);
915 if (ret < 0) 915 if (ret < 0)
916 mlog_errno(ret); 916 mlog_errno(ret);
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 8b23aa2f52dd..23157e40dd74 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -4025,9 +4025,13 @@ static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
4025 osb->dc_work_sequence = osb->dc_wake_sequence; 4025 osb->dc_work_sequence = osb->dc_wake_sequence;
4026 4026
4027 processed = osb->blocked_lock_count; 4027 processed = osb->blocked_lock_count;
4028 while (processed) { 4028 /*
4029 BUG_ON(list_empty(&osb->blocked_lock_list)); 4029 * blocked lock processing in this loop might call iput which can
4030 4030 * remove items off osb->blocked_lock_list. Downconvert up to
4031 * 'processed' number of locks, but stop short if we had some
4032 * removed in ocfs2_mark_lockres_freeing when downconverting.
4033 */
4034 while (processed && !list_empty(&osb->blocked_lock_list)) {
4031 lockres = list_entry(osb->blocked_lock_list.next, 4035 lockres = list_entry(osb->blocked_lock_list.next,
4032 struct ocfs2_lock_res, l_blocked_list); 4036 struct ocfs2_lock_res, l_blocked_list);
4033 list_del_init(&lockres->l_blocked_list); 4037 list_del_init(&lockres->l_blocked_list);
diff --git a/fs/pnode.h b/fs/pnode.h
index 7114ce6e6b9e..0fcdbe7ca648 100644
--- a/fs/pnode.h
+++ b/fs/pnode.h
@@ -20,8 +20,6 @@
20#define SET_MNT_MARK(m) ((m)->mnt.mnt_flags |= MNT_MARKED) 20#define SET_MNT_MARK(m) ((m)->mnt.mnt_flags |= MNT_MARKED)
21#define CLEAR_MNT_MARK(m) ((m)->mnt.mnt_flags &= ~MNT_MARKED) 21#define CLEAR_MNT_MARK(m) ((m)->mnt.mnt_flags &= ~MNT_MARKED)
22#define IS_MNT_LOCKED(m) ((m)->mnt.mnt_flags & MNT_LOCKED) 22#define IS_MNT_LOCKED(m) ((m)->mnt.mnt_flags & MNT_LOCKED)
23#define IS_MNT_LOCKED_AND_LAZY(m) \
24 (((m)->mnt.mnt_flags & (MNT_LOCKED|MNT_SYNC_UMOUNT)) == MNT_LOCKED)
25 23
26#define CL_EXPIRE 0x01 24#define CL_EXPIRE 0x01
27#define CL_SLAVE 0x02 25#define CL_SLAVE 0x02
diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
index d751fcb637bb..1ade1206bb89 100644
--- a/fs/proc/Kconfig
+++ b/fs/proc/Kconfig
@@ -75,3 +75,9 @@ config PROC_PAGE_MONITOR
75config PROC_CHILDREN 75config PROC_CHILDREN
76 bool "Include /proc/<pid>/task/<tid>/children file" 76 bool "Include /proc/<pid>/task/<tid>/children file"
77 default n 77 default n
78 help
79 Provides a fast way to retrieve first level children pids of a task. See
80 <file:Documentation/filesystems/proc.txt> for more information.
81
82 Say Y if you are running any user-space software which takes benefit from
83 this interface. For example, rkt is such a piece of software.
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 87782e874b6a..aa50d1ac28fc 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -243,6 +243,11 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
243 len1 = arg_end - arg_start; 243 len1 = arg_end - arg_start;
244 len2 = env_end - env_start; 244 len2 = env_end - env_start;
245 245
246 /* Empty ARGV. */
247 if (len1 == 0) {
248 rv = 0;
249 goto out_free_page;
250 }
246 /* 251 /*
247 * Inherently racy -- command line shares address space 252 * Inherently racy -- command line shares address space
248 * with code and data. 253 * with code and data.
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index 91a4e6426321..92e6726f6e37 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -92,7 +92,7 @@ static size_t get_kcore_size(int *nphdr, size_t *elf_buflen)
92 roundup(sizeof(CORE_STR), 4)) + 92 roundup(sizeof(CORE_STR), 4)) +
93 roundup(sizeof(struct elf_prstatus), 4) + 93 roundup(sizeof(struct elf_prstatus), 4) +
94 roundup(sizeof(struct elf_prpsinfo), 4) + 94 roundup(sizeof(struct elf_prpsinfo), 4) +
95 roundup(sizeof(struct task_struct), 4); 95 roundup(arch_task_struct_size, 4);
96 *elf_buflen = PAGE_ALIGN(*elf_buflen); 96 *elf_buflen = PAGE_ALIGN(*elf_buflen);
97 return size + *elf_buflen; 97 return size + *elf_buflen;
98} 98}
@@ -415,7 +415,7 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
415 /* set up the task structure */ 415 /* set up the task structure */
416 notes[2].name = CORE_STR; 416 notes[2].name = CORE_STR;
417 notes[2].type = NT_TASKSTRUCT; 417 notes[2].type = NT_TASKSTRUCT;
418 notes[2].datasz = sizeof(struct task_struct); 418 notes[2].datasz = arch_task_struct_size;
419 notes[2].data = current; 419 notes[2].data = current;
420 420
421 nhdr->p_filesz += notesize(&notes[2]); 421 nhdr->p_filesz += notesize(&notes[2]);
diff --git a/fs/signalfd.c b/fs/signalfd.c
index 7e412ad74836..270221fcef42 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -121,8 +121,9 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
121 * Other callers might not initialize the si_lsb field, 121 * Other callers might not initialize the si_lsb field,
122 * so check explicitly for the right codes here. 122 * so check explicitly for the right codes here.
123 */ 123 */
124 if (kinfo->si_code == BUS_MCEERR_AR || 124 if (kinfo->si_signo == SIGBUS &&
125 kinfo->si_code == BUS_MCEERR_AO) 125 (kinfo->si_code == BUS_MCEERR_AR ||
126 kinfo->si_code == BUS_MCEERR_AO))
126 err |= __put_user((short) kinfo->si_addr_lsb, 127 err |= __put_user((short) kinfo->si_addr_lsb,
127 &uinfo->ssi_addr_lsb); 128 &uinfo->ssi_addr_lsb);
128#endif 129#endif
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 6afac3d561ac..8d0b3ade0ff0 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1652,17 +1652,9 @@ static int udf_update_inode(struct inode *inode, int do_sync)
1652 iinfo->i_ext.i_data, inode->i_sb->s_blocksize - 1652 iinfo->i_ext.i_data, inode->i_sb->s_blocksize -
1653 sizeof(struct unallocSpaceEntry)); 1653 sizeof(struct unallocSpaceEntry));
1654 use->descTag.tagIdent = cpu_to_le16(TAG_IDENT_USE); 1654 use->descTag.tagIdent = cpu_to_le16(TAG_IDENT_USE);
1655 use->descTag.tagLocation = 1655 crclen = sizeof(struct unallocSpaceEntry);
1656 cpu_to_le32(iinfo->i_location.logicalBlockNum);
1657 crclen = sizeof(struct unallocSpaceEntry) +
1658 iinfo->i_lenAlloc - sizeof(struct tag);
1659 use->descTag.descCRCLength = cpu_to_le16(crclen);
1660 use->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)use +
1661 sizeof(struct tag),
1662 crclen));
1663 use->descTag.tagChecksum = udf_tag_checksum(&use->descTag);
1664 1656
1665 goto out; 1657 goto finish;
1666 } 1658 }
1667 1659
1668 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET)) 1660 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET))
@@ -1782,6 +1774,8 @@ static int udf_update_inode(struct inode *inode, int do_sync)
1782 efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE); 1774 efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE);
1783 crclen = sizeof(struct extendedFileEntry); 1775 crclen = sizeof(struct extendedFileEntry);
1784 } 1776 }
1777
1778finish:
1785 if (iinfo->i_strat4096) { 1779 if (iinfo->i_strat4096) {
1786 fe->icbTag.strategyType = cpu_to_le16(4096); 1780 fe->icbTag.strategyType = cpu_to_le16(4096);
1787 fe->icbTag.strategyParameter = cpu_to_le16(1); 1781 fe->icbTag.strategyParameter = cpu_to_le16(1);
@@ -1791,7 +1785,9 @@ static int udf_update_inode(struct inode *inode, int do_sync)
1791 fe->icbTag.numEntries = cpu_to_le16(1); 1785 fe->icbTag.numEntries = cpu_to_le16(1);
1792 } 1786 }
1793 1787
1794 if (S_ISDIR(inode->i_mode)) 1788 if (iinfo->i_use)
1789 fe->icbTag.fileType = ICBTAG_FILE_TYPE_USE;
1790 else if (S_ISDIR(inode->i_mode))
1795 fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY; 1791 fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY;
1796 else if (S_ISREG(inode->i_mode)) 1792 else if (S_ISREG(inode->i_mode))
1797 fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR; 1793 fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR;
@@ -1828,7 +1824,6 @@ static int udf_update_inode(struct inode *inode, int do_sync)
1828 crclen)); 1824 crclen));
1829 fe->descTag.tagChecksum = udf_tag_checksum(&fe->descTag); 1825 fe->descTag.tagChecksum = udf_tag_checksum(&fe->descTag);
1830 1826
1831out:
1832 set_buffer_uptodate(bh); 1827 set_buffer_uptodate(bh);
1833 unlock_buffer(bh); 1828 unlock_buffer(bh);
1834 1829
diff --git a/fs/xfs/libxfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c
index 20de88d1bf86..dd714037c322 100644
--- a/fs/xfs/libxfs/xfs_attr_remote.c
+++ b/fs/xfs/libxfs/xfs_attr_remote.c
@@ -159,11 +159,10 @@ xfs_attr3_rmt_write_verify(
159 struct xfs_buf *bp) 159 struct xfs_buf *bp)
160{ 160{
161 struct xfs_mount *mp = bp->b_target->bt_mount; 161 struct xfs_mount *mp = bp->b_target->bt_mount;
162 struct xfs_buf_log_item *bip = bp->b_fspriv; 162 int blksize = mp->m_attr_geo->blksize;
163 char *ptr; 163 char *ptr;
164 int len; 164 int len;
165 xfs_daddr_t bno; 165 xfs_daddr_t bno;
166 int blksize = mp->m_attr_geo->blksize;
167 166
168 /* no verification of non-crc buffers */ 167 /* no verification of non-crc buffers */
169 if (!xfs_sb_version_hascrc(&mp->m_sb)) 168 if (!xfs_sb_version_hascrc(&mp->m_sb))
@@ -175,16 +174,22 @@ xfs_attr3_rmt_write_verify(
175 ASSERT(len >= blksize); 174 ASSERT(len >= blksize);
176 175
177 while (len > 0) { 176 while (len > 0) {
177 struct xfs_attr3_rmt_hdr *rmt = (struct xfs_attr3_rmt_hdr *)ptr;
178
178 if (!xfs_attr3_rmt_verify(mp, ptr, blksize, bno)) { 179 if (!xfs_attr3_rmt_verify(mp, ptr, blksize, bno)) {
179 xfs_buf_ioerror(bp, -EFSCORRUPTED); 180 xfs_buf_ioerror(bp, -EFSCORRUPTED);
180 xfs_verifier_error(bp); 181 xfs_verifier_error(bp);
181 return; 182 return;
182 } 183 }
183 if (bip) {
184 struct xfs_attr3_rmt_hdr *rmt;
185 184
186 rmt = (struct xfs_attr3_rmt_hdr *)ptr; 185 /*
187 rmt->rm_lsn = cpu_to_be64(bip->bli_item.li_lsn); 186 * Ensure we aren't writing bogus LSNs to disk. See
187 * xfs_attr3_rmt_hdr_set() for the explanation.
188 */
189 if (rmt->rm_lsn != cpu_to_be64(NULLCOMMITLSN)) {
190 xfs_buf_ioerror(bp, -EFSCORRUPTED);
191 xfs_verifier_error(bp);
192 return;
188 } 193 }
189 xfs_update_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF); 194 xfs_update_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF);
190 195
@@ -221,6 +226,18 @@ xfs_attr3_rmt_hdr_set(
221 rmt->rm_owner = cpu_to_be64(ino); 226 rmt->rm_owner = cpu_to_be64(ino);
222 rmt->rm_blkno = cpu_to_be64(bno); 227 rmt->rm_blkno = cpu_to_be64(bno);
223 228
229 /*
230 * Remote attribute blocks are written synchronously, so we don't
231 * have an LSN that we can stamp in them that makes any sense to log
232 * recovery. To ensure that log recovery handles overwrites of these
233 * blocks sanely (i.e. once they've been freed and reallocated as some
234 * other type of metadata) we need to ensure that the LSN has a value
235 * that tells log recovery to ignore the LSN and overwrite the buffer
236 * with whatever is in it's log. To do this, we use the magic
237 * NULLCOMMITLSN to indicate that the LSN is invalid.
238 */
239 rmt->rm_lsn = cpu_to_be64(NULLCOMMITLSN);
240
224 return sizeof(struct xfs_attr3_rmt_hdr); 241 return sizeof(struct xfs_attr3_rmt_hdr);
225} 242}
226 243
@@ -434,14 +451,21 @@ xfs_attr_rmtval_set(
434 451
435 /* 452 /*
436 * Allocate a single extent, up to the size of the value. 453 * Allocate a single extent, up to the size of the value.
454 *
455 * Note that we have to consider this a data allocation as we
456 * write the remote attribute without logging the contents.
457 * Hence we must ensure that we aren't using blocks that are on
458 * the busy list so that we don't overwrite blocks which have
459 * recently been freed but their transactions are not yet
460 * committed to disk. If we overwrite the contents of a busy
461 * extent and then crash then the block may not contain the
462 * correct metadata after log recovery occurs.
437 */ 463 */
438 xfs_bmap_init(args->flist, args->firstblock); 464 xfs_bmap_init(args->flist, args->firstblock);
439 nmap = 1; 465 nmap = 1;
440 error = xfs_bmapi_write(args->trans, dp, (xfs_fileoff_t)lblkno, 466 error = xfs_bmapi_write(args->trans, dp, (xfs_fileoff_t)lblkno,
441 blkcnt, 467 blkcnt, XFS_BMAPI_ATTRFORK, args->firstblock,
442 XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA, 468 args->total, &map, &nmap, args->flist);
443 args->firstblock, args->total, &map, &nmap,
444 args->flist);
445 if (!error) { 469 if (!error) {
446 error = xfs_bmap_finish(&args->trans, args->flist, 470 error = xfs_bmap_finish(&args->trans, args->flist,
447 &committed); 471 &committed);
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index f0e8249722d4..db4acc1c3e73 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -1514,18 +1514,27 @@ xfs_filemap_fault(
1514 struct vm_area_struct *vma, 1514 struct vm_area_struct *vma,
1515 struct vm_fault *vmf) 1515 struct vm_fault *vmf)
1516{ 1516{
1517 struct xfs_inode *ip = XFS_I(file_inode(vma->vm_file)); 1517 struct inode *inode = file_inode(vma->vm_file);
1518 int ret; 1518 int ret;
1519 1519
1520 trace_xfs_filemap_fault(ip); 1520 trace_xfs_filemap_fault(XFS_I(inode));
1521 1521
1522 /* DAX can shortcut the normal fault path on write faults! */ 1522 /* DAX can shortcut the normal fault path on write faults! */
1523 if ((vmf->flags & FAULT_FLAG_WRITE) && IS_DAX(VFS_I(ip))) 1523 if ((vmf->flags & FAULT_FLAG_WRITE) && IS_DAX(inode))
1524 return xfs_filemap_page_mkwrite(vma, vmf); 1524 return xfs_filemap_page_mkwrite(vma, vmf);
1525 1525
1526 xfs_ilock(ip, XFS_MMAPLOCK_SHARED); 1526 xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1527 ret = filemap_fault(vma, vmf); 1527 if (IS_DAX(inode)) {
1528 xfs_iunlock(ip, XFS_MMAPLOCK_SHARED); 1528 /*
1529 * we do not want to trigger unwritten extent conversion on read
1530 * faults - that is unnecessary overhead and would also require
1531 * changes to xfs_get_blocks_direct() to map unwritten extent
1532 * ioend for conversion on read-only mappings.
1533 */
1534 ret = __dax_fault(vma, vmf, xfs_get_blocks_direct, NULL);
1535 } else
1536 ret = filemap_fault(vma, vmf);
1537 xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1529 1538
1530 return ret; 1539 return ret;
1531} 1540}
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 01dd228ca05e..480ebba8464f 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1886,9 +1886,14 @@ xlog_recover_get_buf_lsn(
1886 uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid; 1886 uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid;
1887 break; 1887 break;
1888 case XFS_ATTR3_RMT_MAGIC: 1888 case XFS_ATTR3_RMT_MAGIC:
1889 lsn = be64_to_cpu(((struct xfs_attr3_rmt_hdr *)blk)->rm_lsn); 1889 /*
1890 uuid = &((struct xfs_attr3_rmt_hdr *)blk)->rm_uuid; 1890 * Remote attr blocks are written synchronously, rather than
1891 break; 1891 * being logged. That means they do not contain a valid LSN
1892 * (i.e. transactionally ordered) in them, and hence any time we
1893 * see a buffer to replay over the top of a remote attribute
1894 * block we should simply do so.
1895 */
1896 goto recover_immediately;
1892 case XFS_SB_MAGIC: 1897 case XFS_SB_MAGIC:
1893 lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn); 1898 lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
1894 uuid = &((struct xfs_dsb *)blk)->sb_uuid; 1899 uuid = &((struct xfs_dsb *)blk)->sb_uuid;
diff --git a/include/asm-generic/mm-arch-hooks.h b/include/asm-generic/mm-arch-hooks.h
new file mode 100644
index 000000000000..5ff0e5193f85
--- /dev/null
+++ b/include/asm-generic/mm-arch-hooks.h
@@ -0,0 +1,16 @@
1/*
2 * Architecture specific mm hooks
3 */
4
5#ifndef _ASM_GENERIC_MM_ARCH_HOOKS_H
6#define _ASM_GENERIC_MM_ARCH_HOOKS_H
7
8/*
9 * This file should be included through arch/../include/asm/Kbuild for
10 * the architecture which doesn't need specific mm hooks.
11 *
12 * In that case, the generic hooks defined in include/linux/mm-arch-hooks.h
13 * are used.
14 */
15
16#endif /* _ASM_GENERIC_MM_ARCH_HOOKS_H */
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 48db6a56975f..5aa519711e0b 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -691,7 +691,7 @@ struct drm_vblank_crtc {
691 struct timer_list disable_timer; /* delayed disable timer */ 691 struct timer_list disable_timer; /* delayed disable timer */
692 692
693 /* vblank counter, protected by dev->vblank_time_lock for writes */ 693 /* vblank counter, protected by dev->vblank_time_lock for writes */
694 unsigned long count; 694 u32 count;
695 /* vblank timestamps, protected by dev->vblank_time_lock for writes */ 695 /* vblank timestamps, protected by dev->vblank_time_lock for writes */
696 struct timeval time[DRM_VBLANKTIME_RBSIZE]; 696 struct timeval time[DRM_VBLANKTIME_RBSIZE];
697 697
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 57ca8cc383a6..3b4d8a4a23fb 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -743,8 +743,6 @@ struct drm_connector {
743 uint8_t num_h_tile, num_v_tile; 743 uint8_t num_h_tile, num_v_tile;
744 uint8_t tile_h_loc, tile_v_loc; 744 uint8_t tile_h_loc, tile_v_loc;
745 uint16_t tile_h_size, tile_v_size; 745 uint16_t tile_h_size, tile_v_size;
746
747 struct list_head destroy_list;
748}; 746};
749 747
750/** 748/**
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index c8fc187061de..918aa68b5199 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -168,6 +168,7 @@ struct drm_encoder_helper_funcs {
168 * @get_modes: get mode list for this connector 168 * @get_modes: get mode list for this connector
169 * @mode_valid: is this mode valid on the given connector? (optional) 169 * @mode_valid: is this mode valid on the given connector? (optional)
170 * @best_encoder: return the preferred encoder for this connector 170 * @best_encoder: return the preferred encoder for this connector
171 * @atomic_best_encoder: atomic version of @best_encoder
171 * 172 *
172 * The helper operations are called by the mid-layer CRTC helper. 173 * The helper operations are called by the mid-layer CRTC helper.
173 */ 174 */
@@ -176,6 +177,8 @@ struct drm_connector_helper_funcs {
176 enum drm_mode_status (*mode_valid)(struct drm_connector *connector, 177 enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
177 struct drm_display_mode *mode); 178 struct drm_display_mode *mode);
178 struct drm_encoder *(*best_encoder)(struct drm_connector *connector); 179 struct drm_encoder *(*best_encoder)(struct drm_connector *connector);
180 struct drm_encoder *(*atomic_best_encoder)(struct drm_connector *connector,
181 struct drm_connector_state *connector_state);
179}; 182};
180 183
181extern void drm_helper_disable_unused_functions(struct drm_device *dev); 184extern void drm_helper_disable_unused_functions(struct drm_device *dev);
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 799050198323..53c53c459b15 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -348,6 +348,25 @@ static inline int drm_eld_mnl(const uint8_t *eld)
348} 348}
349 349
350/** 350/**
351 * drm_eld_sad - Get ELD SAD structures.
352 * @eld: pointer to an eld memory structure with sad_count set
353 */
354static inline const uint8_t *drm_eld_sad(const uint8_t *eld)
355{
356 unsigned int ver, mnl;
357
358 ver = (eld[DRM_ELD_VER] & DRM_ELD_VER_MASK) >> DRM_ELD_VER_SHIFT;
359 if (ver != 2 && ver != 31)
360 return NULL;
361
362 mnl = drm_eld_mnl(eld);
363 if (mnl > 16)
364 return NULL;
365
366 return eld + DRM_ELD_CEA_SAD(mnl, 0);
367}
368
369/**
351 * drm_eld_sad_count - Get ELD SAD count. 370 * drm_eld_sad_count - Get ELD SAD count.
352 * @eld: pointer to an eld memory structure with sad_count set 371 * @eld: pointer to an eld memory structure with sad_count set
353 */ 372 */
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 45c39a37f924..8bc073d297db 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -172,6 +172,7 @@
172 {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ 172 {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
173 {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ 173 {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
174 {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ 174 {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
175 {0x1002, 0x6617, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
175 {0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 176 {0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
176 {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 177 {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
177 {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 178 {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
diff --git a/include/linux/amba/sp810.h b/include/linux/amba/sp810.h
index c7df89f99115..58fe9e8b6fd7 100644
--- a/include/linux/amba/sp810.h
+++ b/include/linux/amba/sp810.h
@@ -2,7 +2,7 @@
2 * ARM PrimeXsys System Controller SP810 header file 2 * ARM PrimeXsys System Controller SP810 header file
3 * 3 *
4 * Copyright (C) 2009 ST Microelectronics 4 * Copyright (C) 2009 ST Microelectronics
5 * Viresh Kumar <viresh.linux@gmail.com> 5 * Viresh Kumar <vireshk@kernel.org>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
diff --git a/include/linux/ata.h b/include/linux/ata.h
index fed36418dd1c..d2992bfa1706 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -45,6 +45,7 @@ enum {
45 ATA_SECT_SIZE = 512, 45 ATA_SECT_SIZE = 512,
46 ATA_MAX_SECTORS_128 = 128, 46 ATA_MAX_SECTORS_128 = 128,
47 ATA_MAX_SECTORS = 256, 47 ATA_MAX_SECTORS = 256,
48 ATA_MAX_SECTORS_1024 = 1024,
48 ATA_MAX_SECTORS_LBA48 = 65535,/* TODO: 65536? */ 49 ATA_MAX_SECTORS_LBA48 = 65535,/* TODO: 65536? */
49 ATA_MAX_SECTORS_TAPE = 65535, 50 ATA_MAX_SECTORS_TAPE = 65535,
50 51
@@ -384,8 +385,6 @@ enum {
384 SATA_SSP = 0x06, /* Software Settings Preservation */ 385 SATA_SSP = 0x06, /* Software Settings Preservation */
385 SATA_DEVSLP = 0x09, /* Device Sleep */ 386 SATA_DEVSLP = 0x09, /* Device Sleep */
386 387
387 SETFEATURE_SENSE_DATA = 0xC3, /* Sense Data Reporting feature */
388
389 /* feature values for SET_MAX */ 388 /* feature values for SET_MAX */
390 ATA_SET_MAX_ADDR = 0x00, 389 ATA_SET_MAX_ADDR = 0x00,
391 ATA_SET_MAX_PASSWD = 0x01, 390 ATA_SET_MAX_PASSWD = 0x01,
@@ -529,8 +528,6 @@ struct ata_bmdma_prd {
529#define ata_id_cdb_intr(id) (((id)[ATA_ID_CONFIG] & 0x60) == 0x20) 528#define ata_id_cdb_intr(id) (((id)[ATA_ID_CONFIG] & 0x60) == 0x20)
530#define ata_id_has_da(id) ((id)[ATA_ID_SATA_CAPABILITY_2] & (1 << 4)) 529#define ata_id_has_da(id) ((id)[ATA_ID_SATA_CAPABILITY_2] & (1 << 4))
531#define ata_id_has_devslp(id) ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8)) 530#define ata_id_has_devslp(id) ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8))
532#define ata_id_has_ncq_autosense(id) \
533 ((id)[ATA_ID_FEATURE_SUPP] & (1 << 7))
534 531
535static inline bool ata_id_has_hipm(const u16 *id) 532static inline bool ata_id_has_hipm(const u16 *id)
536{ 533{
@@ -719,20 +716,6 @@ static inline bool ata_id_has_read_log_dma_ext(const u16 *id)
719 return false; 716 return false;
720} 717}
721 718
722static inline bool ata_id_has_sense_reporting(const u16 *id)
723{
724 if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
725 return false;
726 return id[ATA_ID_COMMAND_SET_3] & (1 << 6);
727}
728
729static inline bool ata_id_sense_reporting_enabled(const u16 *id)
730{
731 if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
732 return false;
733 return id[ATA_ID_COMMAND_SET_4] & (1 << 6);
734}
735
736/** 719/**
737 * ata_id_major_version - get ATA level of drive 720 * ata_id_major_version - get ATA level of drive
738 * @id: Identify data 721 * @id: Identify data
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 58cfab80dd70..1b62d768c7df 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -47,6 +47,7 @@ struct blkcg {
47 47
48 struct blkcg_policy_data *pd[BLKCG_MAX_POLS]; 48 struct blkcg_policy_data *pd[BLKCG_MAX_POLS];
49 49
50 struct list_head all_blkcgs_node;
50#ifdef CONFIG_CGROUP_WRITEBACK 51#ifdef CONFIG_CGROUP_WRITEBACK
51 struct list_head cgwb_list; 52 struct list_head cgwb_list;
52#endif 53#endif
@@ -88,18 +89,12 @@ struct blkg_policy_data {
88 * Policies that need to keep per-blkcg data which is independent 89 * Policies that need to keep per-blkcg data which is independent
89 * from any request_queue associated to it must specify its size 90 * from any request_queue associated to it must specify its size
90 * with the cpd_size field of the blkcg_policy structure and 91 * with the cpd_size field of the blkcg_policy structure and
91 * embed a blkcg_policy_data in it. blkcg core allocates 92 * embed a blkcg_policy_data in it. cpd_init() is invoked to let
92 * policy-specific per-blkcg structures lazily the first time 93 * each policy handle per-blkcg data.
93 * they are actually needed, so it handles them together with
94 * blkgs. cpd_init() is invoked to let each policy handle
95 * per-blkcg data.
96 */ 94 */
97struct blkcg_policy_data { 95struct blkcg_policy_data {
98 /* the policy id this per-policy data belongs to */ 96 /* the policy id this per-policy data belongs to */
99 int plid; 97 int plid;
100
101 /* used during policy activation */
102 struct list_head alloc_node;
103}; 98};
104 99
105/* association between a blk cgroup and a request queue */ 100/* association between a blk cgroup and a request queue */
diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h
index b6a52a4b457a..51bb6532785c 100644
--- a/include/linux/can/skb.h
+++ b/include/linux/can/skb.h
@@ -27,10 +27,12 @@
27/** 27/**
28 * struct can_skb_priv - private additional data inside CAN sk_buffs 28 * struct can_skb_priv - private additional data inside CAN sk_buffs
29 * @ifindex: ifindex of the first interface the CAN frame appeared on 29 * @ifindex: ifindex of the first interface the CAN frame appeared on
30 * @skbcnt: atomic counter to have an unique id together with skb pointer
30 * @cf: align to the following CAN frame at skb->data 31 * @cf: align to the following CAN frame at skb->data
31 */ 32 */
32struct can_skb_priv { 33struct can_skb_priv {
33 int ifindex; 34 int ifindex;
35 int skbcnt;
34 struct can_frame cf[0]; 36 struct can_frame cf[0];
35}; 37};
36 38
diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h
index a240b18e86fa..08bffcc466de 100644
--- a/include/linux/clkdev.h
+++ b/include/linux/clkdev.h
@@ -33,18 +33,19 @@ struct clk_lookup {
33 } 33 }
34 34
35struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id, 35struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id,
36 const char *dev_fmt, ...); 36 const char *dev_fmt, ...) __printf(3, 4);
37 37
38void clkdev_add(struct clk_lookup *cl); 38void clkdev_add(struct clk_lookup *cl);
39void clkdev_drop(struct clk_lookup *cl); 39void clkdev_drop(struct clk_lookup *cl);
40 40
41struct clk_lookup *clkdev_create(struct clk *clk, const char *con_id, 41struct clk_lookup *clkdev_create(struct clk *clk, const char *con_id,
42 const char *dev_fmt, ...); 42 const char *dev_fmt, ...) __printf(3, 4);
43 43
44void clkdev_add_table(struct clk_lookup *, size_t); 44void clkdev_add_table(struct clk_lookup *, size_t);
45int clk_add_alias(const char *, const char *, const char *, struct device *); 45int clk_add_alias(const char *, const char *, const char *, struct device *);
46 46
47int clk_register_clkdev(struct clk *, const char *, const char *, ...); 47int clk_register_clkdev(struct clk *, const char *, const char *, ...)
48 __printf(3, 4);
48int clk_register_clkdevs(struct clk *, struct clk_lookup *, size_t); 49int clk_register_clkdevs(struct clk *, struct clk_lookup *, size_t);
49 50
50#ifdef CONFIG_COMMON_CLK 51#ifdef CONFIG_COMMON_CLK
diff --git a/include/linux/compat.h b/include/linux/compat.h
index ab25814690bc..a76c9172b2eb 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -424,7 +424,7 @@ asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv,
424 424
425asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp); 425asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp);
426 426
427extern int compat_printk(const char *fmt, ...); 427extern __printf(1, 2) int compat_printk(const char *fmt, ...);
428extern void sigset_from_compat(sigset_t *set, const compat_sigset_t *compat); 428extern void sigset_from_compat(sigset_t *set, const compat_sigset_t *compat);
429extern void sigset_to_compat(compat_sigset_t *compat, const sigset_t *set); 429extern void sigset_to_compat(compat_sigset_t *compat, const sigset_t *set);
430 430
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index c9e5c57e4edf..63a36e89d0eb 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -64,7 +64,8 @@ struct config_item {
64 struct dentry *ci_dentry; 64 struct dentry *ci_dentry;
65}; 65};
66 66
67extern int config_item_set_name(struct config_item *, const char *, ...); 67extern __printf(2, 3)
68int config_item_set_name(struct config_item *, const char *, ...);
68 69
69static inline char *config_item_name(struct config_item * item) 70static inline char *config_item_name(struct config_item * item)
70{ 71{
diff --git a/include/linux/cper.h b/include/linux/cper.h
index 76abba4b238e..dcacb1a72e26 100644
--- a/include/linux/cper.h
+++ b/include/linux/cper.h
@@ -340,7 +340,27 @@ struct cper_ia_proc_ctx {
340 __u64 mm_reg_addr; 340 __u64 mm_reg_addr;
341}; 341};
342 342
343/* Memory Error Section */ 343/* Old Memory Error Section UEFI 2.1, 2.2 */
344struct cper_sec_mem_err_old {
345 __u64 validation_bits;
346 __u64 error_status;
347 __u64 physical_addr;
348 __u64 physical_addr_mask;
349 __u16 node;
350 __u16 card;
351 __u16 module;
352 __u16 bank;
353 __u16 device;
354 __u16 row;
355 __u16 column;
356 __u16 bit_pos;
357 __u64 requestor_id;
358 __u64 responder_id;
359 __u64 target_id;
360 __u8 error_type;
361};
362
363/* Memory Error Section UEFI >= 2.3 */
344struct cper_sec_mem_err { 364struct cper_sec_mem_err {
345 __u64 validation_bits; 365 __u64 validation_bits;
346 __u64 error_status; 366 __u64 error_status;
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index c0fb6b1b4712..23c30bdcca86 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -40,9 +40,10 @@ extern void cpu_remove_dev_attr(struct device_attribute *attr);
40extern int cpu_add_dev_attr_group(struct attribute_group *attrs); 40extern int cpu_add_dev_attr_group(struct attribute_group *attrs);
41extern void cpu_remove_dev_attr_group(struct attribute_group *attrs); 41extern void cpu_remove_dev_attr_group(struct attribute_group *attrs);
42 42
43extern struct device *cpu_device_create(struct device *parent, void *drvdata, 43extern __printf(4, 5)
44 const struct attribute_group **groups, 44struct device *cpu_device_create(struct device *parent, void *drvdata,
45 const char *fmt, ...); 45 const struct attribute_group **groups,
46 const char *fmt, ...);
46#ifdef CONFIG_HOTPLUG_CPU 47#ifdef CONFIG_HOTPLUG_CPU
47extern void unregister_cpu(struct cpu *cpu); 48extern void unregister_cpu(struct cpu *cpu);
48extern ssize_t arch_cpu_probe(const char *, size_t); 49extern ssize_t arch_cpu_probe(const char *, size_t);
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 29ad97c34fd5..bde1e567b3a9 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -62,6 +62,7 @@ struct cpufreq_policy {
62 /* CPUs sharing clock, require sw coordination */ 62 /* CPUs sharing clock, require sw coordination */
63 cpumask_var_t cpus; /* Online CPUs only */ 63 cpumask_var_t cpus; /* Online CPUs only */
64 cpumask_var_t related_cpus; /* Online + Offline CPUs */ 64 cpumask_var_t related_cpus; /* Online + Offline CPUs */
65 cpumask_var_t real_cpus; /* Related and present */
65 66
66 unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs 67 unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs
67 should set cpufreq */ 68 should set cpufreq */
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index d2d50249b7b2..d67ae119cf4e 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -327,7 +327,8 @@ static inline unsigned d_count(const struct dentry *dentry)
327/* 327/*
328 * helper function for dentry_operations.d_dname() members 328 * helper function for dentry_operations.d_dname() members
329 */ 329 */
330extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...); 330extern __printf(4, 5)
331char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
331extern char *simple_dname(struct dentry *, char *, int); 332extern char *simple_dname(struct dentry *, char *, int);
332 333
333extern char *__d_path(const struct path *, const struct path *, char *, int); 334extern char *__d_path(const struct path *, const struct path *, char *, int);
diff --git a/include/linux/device.h b/include/linux/device.h
index 5a31bf3a4024..a2b4ea70a946 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -637,8 +637,9 @@ extern int devres_release_group(struct device *dev, void *id);
637 637
638/* managed devm_k.alloc/kfree for device drivers */ 638/* managed devm_k.alloc/kfree for device drivers */
639extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp); 639extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp);
640extern char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, 640extern __printf(3, 0)
641 va_list ap); 641char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
642 va_list ap);
642extern __printf(3, 4) 643extern __printf(3, 4)
643char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...); 644char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...);
644static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp) 645static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
@@ -1011,12 +1012,10 @@ extern int __must_check device_reprobe(struct device *dev);
1011/* 1012/*
1012 * Easy functions for dynamically creating devices on the fly 1013 * Easy functions for dynamically creating devices on the fly
1013 */ 1014 */
1014extern struct device *device_create_vargs(struct class *cls, 1015extern __printf(5, 0)
1015 struct device *parent, 1016struct device *device_create_vargs(struct class *cls, struct device *parent,
1016 dev_t devt, 1017 dev_t devt, void *drvdata,
1017 void *drvdata, 1018 const char *fmt, va_list vargs);
1018 const char *fmt,
1019 va_list vargs);
1020extern __printf(5, 6) 1019extern __printf(5, 6)
1021struct device *device_create(struct class *cls, struct device *parent, 1020struct device *device_create(struct class *cls, struct device *parent,
1022 dev_t devt, void *drvdata, 1021 dev_t devt, void *drvdata,
diff --git a/include/linux/fs.h b/include/linux/fs.h
index a0653e560c26..84b783f277f7 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -55,7 +55,8 @@ struct vm_fault;
55 55
56extern void __init inode_init(void); 56extern void __init inode_init(void);
57extern void __init inode_init_early(void); 57extern void __init inode_init_early(void);
58extern void __init files_init(unsigned long); 58extern void __init files_init(void);
59extern void __init files_maxfiles_init(void);
59 60
60extern struct files_stat_struct files_stat; 61extern struct files_stat_struct files_stat;
61extern unsigned long get_max_files(void); 62extern unsigned long get_max_files(void);
@@ -1046,12 +1047,12 @@ extern void locks_remove_file(struct file *);
1046extern void locks_release_private(struct file_lock *); 1047extern void locks_release_private(struct file_lock *);
1047extern void posix_test_lock(struct file *, struct file_lock *); 1048extern void posix_test_lock(struct file *, struct file_lock *);
1048extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *); 1049extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *);
1049extern int posix_lock_file_wait(struct file *, struct file_lock *); 1050extern int posix_lock_inode_wait(struct inode *, struct file_lock *);
1050extern int posix_unblock_lock(struct file_lock *); 1051extern int posix_unblock_lock(struct file_lock *);
1051extern int vfs_test_lock(struct file *, struct file_lock *); 1052extern int vfs_test_lock(struct file *, struct file_lock *);
1052extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *); 1053extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *);
1053extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl); 1054extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
1054extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl); 1055extern int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl);
1055extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type); 1056extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type);
1056extern void lease_get_mtime(struct inode *, struct timespec *time); 1057extern void lease_get_mtime(struct inode *, struct timespec *time);
1057extern int generic_setlease(struct file *, long, struct file_lock **, void **priv); 1058extern int generic_setlease(struct file *, long, struct file_lock **, void **priv);
@@ -1137,7 +1138,8 @@ static inline int posix_lock_file(struct file *filp, struct file_lock *fl,
1137 return -ENOLCK; 1138 return -ENOLCK;
1138} 1139}
1139 1140
1140static inline int posix_lock_file_wait(struct file *filp, struct file_lock *fl) 1141static inline int posix_lock_inode_wait(struct inode *inode,
1142 struct file_lock *fl)
1141{ 1143{
1142 return -ENOLCK; 1144 return -ENOLCK;
1143} 1145}
@@ -1163,8 +1165,8 @@ static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
1163 return 0; 1165 return 0;
1164} 1166}
1165 1167
1166static inline int flock_lock_file_wait(struct file *filp, 1168static inline int flock_lock_inode_wait(struct inode *inode,
1167 struct file_lock *request) 1169 struct file_lock *request)
1168{ 1170{
1169 return -ENOLCK; 1171 return -ENOLCK;
1170} 1172}
@@ -1202,6 +1204,20 @@ static inline void show_fd_locks(struct seq_file *f,
1202 struct file *filp, struct files_struct *files) {} 1204 struct file *filp, struct files_struct *files) {}
1203#endif /* !CONFIG_FILE_LOCKING */ 1205#endif /* !CONFIG_FILE_LOCKING */
1204 1206
1207static inline struct inode *file_inode(const struct file *f)
1208{
1209 return f->f_inode;
1210}
1211
1212static inline int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
1213{
1214 return posix_lock_inode_wait(file_inode(filp), fl);
1215}
1216
1217static inline int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
1218{
1219 return flock_lock_inode_wait(file_inode(filp), fl);
1220}
1205 1221
1206struct fasync_struct { 1222struct fasync_struct {
1207 spinlock_t fa_lock; 1223 spinlock_t fa_lock;
@@ -2011,11 +2027,6 @@ extern void ihold(struct inode * inode);
2011extern void iput(struct inode *); 2027extern void iput(struct inode *);
2012extern int generic_update_time(struct inode *, struct timespec *, int); 2028extern int generic_update_time(struct inode *, struct timespec *, int);
2013 2029
2014static inline struct inode *file_inode(const struct file *f)
2015{
2016 return f->f_inode;
2017}
2018
2019/* /sys/fs */ 2030/* /sys/fs */
2020extern struct kobject *fs_kobj; 2031extern struct kobject *fs_kobj;
2021 2032
@@ -2235,7 +2246,7 @@ extern int ioctl_preallocate(struct file *filp, void __user *argp);
2235 2246
2236/* fs/dcache.c */ 2247/* fs/dcache.c */
2237extern void __init vfs_caches_init_early(void); 2248extern void __init vfs_caches_init_early(void);
2238extern void __init vfs_caches_init(unsigned long); 2249extern void __init vfs_caches_init(void);
2239 2250
2240extern struct kmem_cache *names_cachep; 2251extern struct kmem_cache *names_cachep;
2241 2252
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 1da602982cf9..6cd8c0ee4b6f 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -116,6 +116,7 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
116 * SAVE_REGS. If another ops with this flag set is already registered 116 * SAVE_REGS. If another ops with this flag set is already registered
117 * for any of the functions that this ops will be registered for, then 117 * for any of the functions that this ops will be registered for, then
118 * this ops will fail to register or set_filter_ip. 118 * this ops will fail to register or set_filter_ip.
119 * PID - Is affected by set_ftrace_pid (allows filtering on those pids)
119 */ 120 */
120enum { 121enum {
121 FTRACE_OPS_FL_ENABLED = 1 << 0, 122 FTRACE_OPS_FL_ENABLED = 1 << 0,
@@ -132,6 +133,7 @@ enum {
132 FTRACE_OPS_FL_MODIFYING = 1 << 11, 133 FTRACE_OPS_FL_MODIFYING = 1 << 11,
133 FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12, 134 FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12,
134 FTRACE_OPS_FL_IPMODIFY = 1 << 13, 135 FTRACE_OPS_FL_IPMODIFY = 1 << 13,
136 FTRACE_OPS_FL_PID = 1 << 14,
135}; 137};
136 138
137#ifdef CONFIG_DYNAMIC_FTRACE 139#ifdef CONFIG_DYNAMIC_FTRACE
@@ -159,6 +161,7 @@ struct ftrace_ops {
159 struct ftrace_ops *next; 161 struct ftrace_ops *next;
160 unsigned long flags; 162 unsigned long flags;
161 void *private; 163 void *private;
164 ftrace_func_t saved_func;
162 int __percpu *disabled; 165 int __percpu *disabled;
163#ifdef CONFIG_DYNAMIC_FTRACE 166#ifdef CONFIG_DYNAMIC_FTRACE
164 int nr_trampolines; 167 int nr_trampolines;
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index cc7ec129b329..c8393cd4d44f 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -45,7 +45,7 @@ struct seq_file;
45 * @base: identifies the first GPIO number handled by this chip; 45 * @base: identifies the first GPIO number handled by this chip;
46 * or, if negative during registration, requests dynamic ID allocation. 46 * or, if negative during registration, requests dynamic ID allocation.
47 * DEPRECATION: providing anything non-negative and nailing the base 47 * DEPRECATION: providing anything non-negative and nailing the base
48 * base offset of GPIO chips is deprecated. Please pass -1 as base to 48 * offset of GPIO chips is deprecated. Please pass -1 as base to
49 * let gpiolib select the chip base in all possible cases. We want to 49 * let gpiolib select the chip base in all possible cases. We want to
50 * get rid of the static GPIO number space in the long run. 50 * get rid of the static GPIO number space in the long run.
51 * @ngpio: the number of GPIOs handled by this controller; the last GPIO 51 * @ngpio: the number of GPIOs handled by this controller; the last GPIO
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
index 0042bf330b99..c02b5ce6c5cd 100644
--- a/include/linux/hid-sensor-hub.h
+++ b/include/linux/hid-sensor-hub.h
@@ -230,6 +230,7 @@ struct hid_sensor_common {
230 struct platform_device *pdev; 230 struct platform_device *pdev;
231 unsigned usage_id; 231 unsigned usage_id;
232 atomic_t data_ready; 232 atomic_t data_ready;
233 atomic_t user_requested_state;
233 struct iio_trigger *trigger; 234 struct iio_trigger *trigger;
234 struct hid_sensor_hub_attribute_info poll; 235 struct hid_sensor_hub_attribute_info poll;
235 struct hid_sensor_hub_attribute_info report_state; 236 struct hid_sensor_hub_attribute_info report_state;
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 205026175c42..d891f949466a 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -460,15 +460,14 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
460 return &mm->page_table_lock; 460 return &mm->page_table_lock;
461} 461}
462 462
463static inline bool hugepages_supported(void) 463#ifndef hugepages_supported
464{ 464/*
465 /* 465 * Some platform decide whether they support huge pages at boot
466 * Some platform decide whether they support huge pages at boot 466 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
467 * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when 467 * when there is no such support
468 * there is no such support 468 */
469 */ 469#define hugepages_supported() (HPAGE_SHIFT != 0)
470 return HPAGE_SHIFT != 0; 470#endif
471}
472 471
473#else /* CONFIG_HUGETLB_PAGE */ 472#else /* CONFIG_HUGETLB_PAGE */
474struct hstate {}; 473struct hstate {};
diff --git a/include/linux/init.h b/include/linux/init.h
index 7c68c36d3fd8..b449f378f995 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -282,68 +282,8 @@ void __init parse_early_param(void);
282void __init parse_early_options(char *cmdline); 282void __init parse_early_options(char *cmdline);
283#endif /* __ASSEMBLY__ */ 283#endif /* __ASSEMBLY__ */
284 284
285/**
286 * module_init() - driver initialization entry point
287 * @x: function to be run at kernel boot time or module insertion
288 *
289 * module_init() will either be called during do_initcalls() (if
290 * builtin) or at module insertion time (if a module). There can only
291 * be one per module.
292 */
293#define module_init(x) __initcall(x);
294
295/**
296 * module_exit() - driver exit entry point
297 * @x: function to be run when driver is removed
298 *
299 * module_exit() will wrap the driver clean-up code
300 * with cleanup_module() when used with rmmod when
301 * the driver is a module. If the driver is statically
302 * compiled into the kernel, module_exit() has no effect.
303 * There can only be one per module.
304 */
305#define module_exit(x) __exitcall(x);
306
307#else /* MODULE */ 285#else /* MODULE */
308 286
309/*
310 * In most cases loadable modules do not need custom
311 * initcall levels. There are still some valid cases where
312 * a driver may be needed early if built in, and does not
313 * matter when built as a loadable module. Like bus
314 * snooping debug drivers.
315 */
316#define early_initcall(fn) module_init(fn)
317#define core_initcall(fn) module_init(fn)
318#define core_initcall_sync(fn) module_init(fn)
319#define postcore_initcall(fn) module_init(fn)
320#define postcore_initcall_sync(fn) module_init(fn)
321#define arch_initcall(fn) module_init(fn)
322#define subsys_initcall(fn) module_init(fn)
323#define subsys_initcall_sync(fn) module_init(fn)
324#define fs_initcall(fn) module_init(fn)
325#define fs_initcall_sync(fn) module_init(fn)
326#define rootfs_initcall(fn) module_init(fn)
327#define device_initcall(fn) module_init(fn)
328#define device_initcall_sync(fn) module_init(fn)
329#define late_initcall(fn) module_init(fn)
330#define late_initcall_sync(fn) module_init(fn)
331
332#define console_initcall(fn) module_init(fn)
333#define security_initcall(fn) module_init(fn)
334
335/* Each module must use one module_init(). */
336#define module_init(initfn) \
337 static inline initcall_t __inittest(void) \
338 { return initfn; } \
339 int init_module(void) __attribute__((alias(#initfn)));
340
341/* This is only required if you want to be unloadable. */
342#define module_exit(exitfn) \
343 static inline exitcall_t __exittest(void) \
344 { return exitfn; } \
345 void cleanup_module(void) __attribute__((alias(#exitfn)));
346
347#define __setup_param(str, unique_id, fn) /* nothing */ 287#define __setup_param(str, unique_id, fn) /* nothing */
348#define __setup(str, func) /* nothing */ 288#define __setup(str, func) /* nothing */
349#endif 289#endif
@@ -351,24 +291,6 @@ void __init parse_early_options(char *cmdline);
351/* Data marked not to be saved by software suspend */ 291/* Data marked not to be saved by software suspend */
352#define __nosavedata __section(.data..nosave) 292#define __nosavedata __section(.data..nosave)
353 293
354/* This means "can be init if no module support, otherwise module load
355 may call it." */
356#ifdef CONFIG_MODULES
357#define __init_or_module
358#define __initdata_or_module
359#define __initconst_or_module
360#define __INIT_OR_MODULE .text
361#define __INITDATA_OR_MODULE .data
362#define __INITRODATA_OR_MODULE .section ".rodata","a",%progbits
363#else
364#define __init_or_module __init
365#define __initdata_or_module __initdata
366#define __initconst_or_module __initconst
367#define __INIT_OR_MODULE __INIT
368#define __INITDATA_OR_MODULE __INITDATA
369#define __INITRODATA_OR_MODULE __INITRODATA
370#endif /*CONFIG_MODULES*/
371
372#ifdef MODULE 294#ifdef MODULE
373#define __exit_p(x) x 295#define __exit_p(x) x
374#else 296#else
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index dc767f7c3704..f9c1b6d0f2e4 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -258,7 +258,7 @@ extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr,
258 void *data); 258 void *data);
259struct device *iommu_device_create(struct device *parent, void *drvdata, 259struct device *iommu_device_create(struct device *parent, void *drvdata,
260 const struct attribute_group **groups, 260 const struct attribute_group **groups,
261 const char *fmt, ...); 261 const char *fmt, ...) __printf(4, 5);
262void iommu_device_destroy(struct device *dev); 262void iommu_device_destroy(struct device *dev);
263int iommu_device_link(struct device *dev, struct device *link); 263int iommu_device_link(struct device *dev, struct device *link);
264void iommu_device_unlink(struct device *dev, struct device *link); 264void iommu_device_unlink(struct device *dev, struct device *link);
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 92188b0225bb..51744bcf74ee 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -484,6 +484,7 @@ extern int irq_chip_set_affinity_parent(struct irq_data *data,
484extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on); 484extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on);
485extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, 485extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data,
486 void *vcpu_info); 486 void *vcpu_info);
487extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type);
487#endif 488#endif
488 489
489/* Handling of unhandled and spurious interrupts: */ 490/* Handling of unhandled and spurious interrupts: */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 5f0be58640ea..5582410727cb 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -411,7 +411,8 @@ extern __printf(3, 0)
411int vscnprintf(char *buf, size_t size, const char *fmt, va_list args); 411int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
412extern __printf(2, 3) 412extern __printf(2, 3)
413char *kasprintf(gfp_t gfp, const char *fmt, ...); 413char *kasprintf(gfp_t gfp, const char *fmt, ...);
414extern char *kvasprintf(gfp_t gfp, const char *fmt, va_list args); 414extern __printf(2, 0)
415char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
415 416
416extern __scanf(2, 3) 417extern __scanf(2, 3)
417int sscanf(const char *, const char *, ...); 418int sscanf(const char *, const char *, ...);
@@ -679,10 +680,10 @@ do { \
679 __ftrace_vprintk(_THIS_IP_, fmt, vargs); \ 680 __ftrace_vprintk(_THIS_IP_, fmt, vargs); \
680} while (0) 681} while (0)
681 682
682extern int 683extern __printf(2, 0) int
683__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap); 684__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
684 685
685extern int 686extern __printf(2, 0) int
686__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap); 687__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
687 688
688extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode); 689extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
@@ -702,7 +703,7 @@ int trace_printk(const char *fmt, ...)
702{ 703{
703 return 0; 704 return 0;
704} 705}
705static inline int 706static __printf(1, 0) inline int
706ftrace_vprintk(const char *fmt, va_list ap) 707ftrace_vprintk(const char *fmt, va_list ap)
707{ 708{
708 return 0; 709 return 0;
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index 2d61b909f414..637f67002c5a 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -80,8 +80,9 @@ struct kobject {
80 80
81extern __printf(2, 3) 81extern __printf(2, 3)
82int kobject_set_name(struct kobject *kobj, const char *name, ...); 82int kobject_set_name(struct kobject *kobj, const char *name, ...);
83extern int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, 83extern __printf(2, 0)
84 va_list vargs); 84int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
85 va_list vargs);
85 86
86static inline const char *kobject_name(const struct kobject *kobj) 87static inline const char *kobject_name(const struct kobject *kobj)
87{ 88{
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 9564fd78c547..05e99b8ef465 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -734,6 +734,24 @@ static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
734 return false; 734 return false;
735} 735}
736#endif 736#endif
737#ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE
738void kvm_arch_start_assignment(struct kvm *kvm);
739void kvm_arch_end_assignment(struct kvm *kvm);
740bool kvm_arch_has_assigned_device(struct kvm *kvm);
741#else
742static inline void kvm_arch_start_assignment(struct kvm *kvm)
743{
744}
745
746static inline void kvm_arch_end_assignment(struct kvm *kvm)
747{
748}
749
750static inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
751{
752 return false;
753}
754#endif
737 755
738static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu) 756static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
739{ 757{
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 36ce37bcc963..c9cfbcdb8d14 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -431,6 +431,8 @@ enum {
431 ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */ 431 ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */
432 ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */ 432 ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */
433 ATA_HORKAGE_NO_NCQ_LOG = (1 << 23), /* don't use NCQ for log read */ 433 ATA_HORKAGE_NO_NCQ_LOG = (1 << 23), /* don't use NCQ for log read */
434 ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */
435 ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */
434 436
435 /* DMA mask for user DMA control: User visible values; DO NOT 437 /* DMA mask for user DMA control: User visible values; DO NOT
436 renumber */ 438 renumber */
diff --git a/include/linux/mfd/max77693-common.h b/include/linux/mfd/max77693-common.h
new file mode 100644
index 000000000000..095b121aa725
--- /dev/null
+++ b/include/linux/mfd/max77693-common.h
@@ -0,0 +1,49 @@
1/*
2 * Common data shared between Maxim 77693 and 77843 drivers
3 *
4 * Copyright (C) 2015 Samsung Electronics
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#ifndef __LINUX_MFD_MAX77693_COMMON_H
13#define __LINUX_MFD_MAX77693_COMMON_H
14
15enum max77693_types {
16 TYPE_MAX77693_UNKNOWN,
17 TYPE_MAX77693,
18 TYPE_MAX77843,
19
20 TYPE_MAX77693_NUM,
21};
22
23/*
24 * Shared also with max77843.
25 */
26struct max77693_dev {
27 struct device *dev;
28 struct i2c_client *i2c; /* 0xCC , PMIC, Charger, Flash LED */
29 struct i2c_client *i2c_muic; /* 0x4A , MUIC */
30 struct i2c_client *i2c_haptic; /* MAX77693: 0x90 , Haptic */
31 struct i2c_client *i2c_chg; /* MAX77843: 0xD2, Charger */
32
33 enum max77693_types type;
34
35 struct regmap *regmap;
36 struct regmap *regmap_muic;
37 struct regmap *regmap_haptic; /* Only MAX77693 */
38 struct regmap *regmap_chg; /* Only MAX77843 */
39
40 struct regmap_irq_chip_data *irq_data_led;
41 struct regmap_irq_chip_data *irq_data_topsys;
42 struct regmap_irq_chip_data *irq_data_chg; /* Only MAX77693 */
43 struct regmap_irq_chip_data *irq_data_muic;
44
45 int irq;
46};
47
48
49#endif /* __LINUX_MFD_MAX77693_COMMON_H */
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
index 51633ea6f910..3c7a63b98ad6 100644
--- a/include/linux/mfd/max77693-private.h
+++ b/include/linux/mfd/max77693-private.h
@@ -310,30 +310,30 @@ enum max77693_muic_reg {
310#define INTMASK2_CHGTYP_MASK (1 << INTMASK2_CHGTYP_SHIFT) 310#define INTMASK2_CHGTYP_MASK (1 << INTMASK2_CHGTYP_SHIFT)
311 311
312/* MAX77693 MUIC - STATUS1~3 Register */ 312/* MAX77693 MUIC - STATUS1~3 Register */
313#define STATUS1_ADC_SHIFT (0) 313#define MAX77693_STATUS1_ADC_SHIFT 0
314#define STATUS1_ADCLOW_SHIFT (5) 314#define MAX77693_STATUS1_ADCLOW_SHIFT 5
315#define STATUS1_ADCERR_SHIFT (6) 315#define MAX77693_STATUS1_ADCERR_SHIFT 6
316#define STATUS1_ADC1K_SHIFT (7) 316#define MAX77693_STATUS1_ADC1K_SHIFT 7
317#define STATUS1_ADC_MASK (0x1f << STATUS1_ADC_SHIFT) 317#define MAX77693_STATUS1_ADC_MASK (0x1f << MAX77693_STATUS1_ADC_SHIFT)
318#define STATUS1_ADCLOW_MASK (0x1 << STATUS1_ADCLOW_SHIFT) 318#define MAX77693_STATUS1_ADCLOW_MASK BIT(MAX77693_STATUS1_ADCLOW_SHIFT)
319#define STATUS1_ADCERR_MASK (0x1 << STATUS1_ADCERR_SHIFT) 319#define MAX77693_STATUS1_ADCERR_MASK BIT(MAX77693_STATUS1_ADCERR_SHIFT)
320#define STATUS1_ADC1K_MASK (0x1 << STATUS1_ADC1K_SHIFT) 320#define MAX77693_STATUS1_ADC1K_MASK BIT(MAX77693_STATUS1_ADC1K_SHIFT)
321 321
322#define STATUS2_CHGTYP_SHIFT (0) 322#define MAX77693_STATUS2_CHGTYP_SHIFT 0
323#define STATUS2_CHGDETRUN_SHIFT (3) 323#define MAX77693_STATUS2_CHGDETRUN_SHIFT 3
324#define STATUS2_DCDTMR_SHIFT (4) 324#define MAX77693_STATUS2_DCDTMR_SHIFT 4
325#define STATUS2_DXOVP_SHIFT (5) 325#define MAX77693_STATUS2_DXOVP_SHIFT 5
326#define STATUS2_VBVOLT_SHIFT (6) 326#define MAX77693_STATUS2_VBVOLT_SHIFT 6
327#define STATUS2_VIDRM_SHIFT (7) 327#define MAX77693_STATUS2_VIDRM_SHIFT 7
328#define STATUS2_CHGTYP_MASK (0x7 << STATUS2_CHGTYP_SHIFT) 328#define MAX77693_STATUS2_CHGTYP_MASK (0x7 << MAX77693_STATUS2_CHGTYP_SHIFT)
329#define STATUS2_CHGDETRUN_MASK (0x1 << STATUS2_CHGDETRUN_SHIFT) 329#define MAX77693_STATUS2_CHGDETRUN_MASK BIT(MAX77693_STATUS2_CHGDETRUN_SHIFT)
330#define STATUS2_DCDTMR_MASK (0x1 << STATUS2_DCDTMR_SHIFT) 330#define MAX77693_STATUS2_DCDTMR_MASK BIT(MAX77693_STATUS2_DCDTMR_SHIFT)
331#define STATUS2_DXOVP_MASK (0x1 << STATUS2_DXOVP_SHIFT) 331#define MAX77693_STATUS2_DXOVP_MASK BIT(MAX77693_STATUS2_DXOVP_SHIFT)
332#define STATUS2_VBVOLT_MASK (0x1 << STATUS2_VBVOLT_SHIFT) 332#define MAX77693_STATUS2_VBVOLT_MASK BIT(MAX77693_STATUS2_VBVOLT_SHIFT)
333#define STATUS2_VIDRM_MASK (0x1 << STATUS2_VIDRM_SHIFT) 333#define MAX77693_STATUS2_VIDRM_MASK BIT(MAX77693_STATUS2_VIDRM_SHIFT)
334 334
335#define STATUS3_OVP_SHIFT (2) 335#define MAX77693_STATUS3_OVP_SHIFT 2
336#define STATUS3_OVP_MASK (0x1 << STATUS3_OVP_SHIFT) 336#define MAX77693_STATUS3_OVP_MASK BIT(MAX77693_STATUS3_OVP_SHIFT)
337 337
338/* MAX77693 CDETCTRL1~2 register */ 338/* MAX77693 CDETCTRL1~2 register */
339#define CDETCTRL1_CHGDETEN_SHIFT (0) 339#define CDETCTRL1_CHGDETEN_SHIFT (0)
@@ -362,38 +362,38 @@ enum max77693_muic_reg {
362#define COMN1SW_MASK (0x7 << COMN1SW_SHIFT) 362#define COMN1SW_MASK (0x7 << COMN1SW_SHIFT)
363#define COMP2SW_MASK (0x7 << COMP2SW_SHIFT) 363#define COMP2SW_MASK (0x7 << COMP2SW_SHIFT)
364#define COMP_SW_MASK (COMP2SW_MASK | COMN1SW_MASK) 364#define COMP_SW_MASK (COMP2SW_MASK | COMN1SW_MASK)
365#define CONTROL1_SW_USB ((1 << COMP2SW_SHIFT) \ 365#define MAX77693_CONTROL1_SW_USB ((1 << COMP2SW_SHIFT) \
366 | (1 << COMN1SW_SHIFT)) 366 | (1 << COMN1SW_SHIFT))
367#define CONTROL1_SW_AUDIO ((2 << COMP2SW_SHIFT) \ 367#define MAX77693_CONTROL1_SW_AUDIO ((2 << COMP2SW_SHIFT) \
368 | (2 << COMN1SW_SHIFT)) 368 | (2 << COMN1SW_SHIFT))
369#define CONTROL1_SW_UART ((3 << COMP2SW_SHIFT) \ 369#define MAX77693_CONTROL1_SW_UART ((3 << COMP2SW_SHIFT) \
370 | (3 << COMN1SW_SHIFT)) 370 | (3 << COMN1SW_SHIFT))
371#define CONTROL1_SW_OPEN ((0 << COMP2SW_SHIFT) \ 371#define MAX77693_CONTROL1_SW_OPEN ((0 << COMP2SW_SHIFT) \
372 | (0 << COMN1SW_SHIFT)) 372 | (0 << COMN1SW_SHIFT))
373 373
374#define CONTROL2_LOWPWR_SHIFT (0) 374#define MAX77693_CONTROL2_LOWPWR_SHIFT 0
375#define CONTROL2_ADCEN_SHIFT (1) 375#define MAX77693_CONTROL2_ADCEN_SHIFT 1
376#define CONTROL2_CPEN_SHIFT (2) 376#define MAX77693_CONTROL2_CPEN_SHIFT 2
377#define CONTROL2_SFOUTASRT_SHIFT (3) 377#define MAX77693_CONTROL2_SFOUTASRT_SHIFT 3
378#define CONTROL2_SFOUTORD_SHIFT (4) 378#define MAX77693_CONTROL2_SFOUTORD_SHIFT 4
379#define CONTROL2_ACCDET_SHIFT (5) 379#define MAX77693_CONTROL2_ACCDET_SHIFT 5
380#define CONTROL2_USBCPINT_SHIFT (6) 380#define MAX77693_CONTROL2_USBCPINT_SHIFT 6
381#define CONTROL2_RCPS_SHIFT (7) 381#define MAX77693_CONTROL2_RCPS_SHIFT 7
382#define CONTROL2_LOWPWR_MASK (0x1 << CONTROL2_LOWPWR_SHIFT) 382#define MAX77693_CONTROL2_LOWPWR_MASK BIT(MAX77693_CONTROL2_LOWPWR_SHIFT)
383#define CONTROL2_ADCEN_MASK (0x1 << CONTROL2_ADCEN_SHIFT) 383#define MAX77693_CONTROL2_ADCEN_MASK BIT(MAX77693_CONTROL2_ADCEN_SHIFT)
384#define CONTROL2_CPEN_MASK (0x1 << CONTROL2_CPEN_SHIFT) 384#define MAX77693_CONTROL2_CPEN_MASK BIT(MAX77693_CONTROL2_CPEN_SHIFT)
385#define CONTROL2_SFOUTASRT_MASK (0x1 << CONTROL2_SFOUTASRT_SHIFT) 385#define MAX77693_CONTROL2_SFOUTASRT_MASK BIT(MAX77693_CONTROL2_SFOUTASRT_SHIFT)
386#define CONTROL2_SFOUTORD_MASK (0x1 << CONTROL2_SFOUTORD_SHIFT) 386#define MAX77693_CONTROL2_SFOUTORD_MASK BIT(MAX77693_CONTROL2_SFOUTORD_SHIFT)
387#define CONTROL2_ACCDET_MASK (0x1 << CONTROL2_ACCDET_SHIFT) 387#define MAX77693_CONTROL2_ACCDET_MASK BIT(MAX77693_CONTROL2_ACCDET_SHIFT)
388#define CONTROL2_USBCPINT_MASK (0x1 << CONTROL2_USBCPINT_SHIFT) 388#define MAX77693_CONTROL2_USBCPINT_MASK BIT(MAX77693_CONTROL2_USBCPINT_SHIFT)
389#define CONTROL2_RCPS_MASK (0x1 << CONTROL2_RCPS_SHIFT) 389#define MAX77693_CONTROL2_RCPS_MASK BIT(MAX77693_CONTROL2_RCPS_SHIFT)
390 390
391#define CONTROL3_JIGSET_SHIFT (0) 391#define MAX77693_CONTROL3_JIGSET_SHIFT 0
392#define CONTROL3_BTLDSET_SHIFT (2) 392#define MAX77693_CONTROL3_BTLDSET_SHIFT 2
393#define CONTROL3_ADCDBSET_SHIFT (4) 393#define MAX77693_CONTROL3_ADCDBSET_SHIFT 4
394#define CONTROL3_JIGSET_MASK (0x3 << CONTROL3_JIGSET_SHIFT) 394#define MAX77693_CONTROL3_JIGSET_MASK (0x3 << MAX77693_CONTROL3_JIGSET_SHIFT)
395#define CONTROL3_BTLDSET_MASK (0x3 << CONTROL3_BTLDSET_SHIFT) 395#define MAX77693_CONTROL3_BTLDSET_MASK (0x3 << MAX77693_CONTROL3_BTLDSET_SHIFT)
396#define CONTROL3_ADCDBSET_MASK (0x3 << CONTROL3_ADCDBSET_SHIFT) 396#define MAX77693_CONTROL3_ADCDBSET_MASK (0x3 << MAX77693_CONTROL3_ADCDBSET_SHIFT)
397 397
398/* Slave addr = 0x90: Haptic */ 398/* Slave addr = 0x90: Haptic */
399enum max77693_haptic_reg { 399enum max77693_haptic_reg {
@@ -529,36 +529,4 @@ enum max77693_irq_muic {
529 MAX77693_MUIC_IRQ_NR, 529 MAX77693_MUIC_IRQ_NR,
530}; 530};
531 531
532struct max77693_dev {
533 struct device *dev;
534 struct i2c_client *i2c; /* 0xCC , PMIC, Charger, Flash LED */
535 struct i2c_client *muic; /* 0x4A , MUIC */
536 struct i2c_client *haptic; /* 0x90 , Haptic */
537
538 int type;
539
540 struct regmap *regmap;
541 struct regmap *regmap_muic;
542 struct regmap *regmap_haptic;
543
544 struct regmap_irq_chip_data *irq_data_led;
545 struct regmap_irq_chip_data *irq_data_topsys;
546 struct regmap_irq_chip_data *irq_data_charger;
547 struct regmap_irq_chip_data *irq_data_muic;
548
549 int irq;
550 int irq_gpio;
551 struct mutex irqlock;
552 int irq_masks_cur[MAX77693_IRQ_GROUP_NR];
553 int irq_masks_cache[MAX77693_IRQ_GROUP_NR];
554};
555
556enum max77693_types {
557 TYPE_MAX77693,
558};
559
560extern int max77693_irq_init(struct max77693_dev *max77686);
561extern void max77693_irq_exit(struct max77693_dev *max77686);
562extern int max77693_irq_resume(struct max77693_dev *max77686);
563
564#endif /* __LINUX_MFD_MAX77693_PRIV_H */ 532#endif /* __LINUX_MFD_MAX77693_PRIV_H */
diff --git a/include/linux/mfd/max77843-private.h b/include/linux/mfd/max77843-private.h
index 7178ace8379e..c19303b0ccfd 100644
--- a/include/linux/mfd/max77843-private.h
+++ b/include/linux/mfd/max77843-private.h
@@ -318,62 +318,62 @@ enum max77843_irq_muic {
318 MAX77843_INTSRCMASK_SYS_MASK | MAX77843_INTSRCMASK_CHGR_MASK) 318 MAX77843_INTSRCMASK_SYS_MASK | MAX77843_INTSRCMASK_CHGR_MASK)
319 319
320/* MAX77843 STATUS register*/ 320/* MAX77843 STATUS register*/
321#define STATUS1_ADC_SHIFT 0 321#define MAX77843_MUIC_STATUS1_ADC_SHIFT 0
322#define STATUS1_ADCERROR_SHIFT 6 322#define MAX77843_MUIC_STATUS1_ADCERROR_SHIFT 6
323#define STATUS1_ADC1K_SHIFT 7 323#define MAX77843_MUIC_STATUS1_ADC1K_SHIFT 7
324#define STATUS2_CHGTYP_SHIFT 0 324#define MAX77843_MUIC_STATUS2_CHGTYP_SHIFT 0
325#define STATUS2_CHGDETRUN_SHIFT 3 325#define MAX77843_MUIC_STATUS2_CHGDETRUN_SHIFT 3
326#define STATUS2_DCDTMR_SHIFT 4 326#define MAX77843_MUIC_STATUS2_DCDTMR_SHIFT 4
327#define STATUS2_DXOVP_SHIFT 5 327#define MAX77843_MUIC_STATUS2_DXOVP_SHIFT 5
328#define STATUS2_VBVOLT_SHIFT 6 328#define MAX77843_MUIC_STATUS2_VBVOLT_SHIFT 6
329#define STATUS3_VBADC_SHIFT 0 329#define MAX77843_MUIC_STATUS3_VBADC_SHIFT 0
330#define STATUS3_VDNMON_SHIFT 4 330#define MAX77843_MUIC_STATUS3_VDNMON_SHIFT 4
331#define STATUS3_DNRES_SHIFT 5 331#define MAX77843_MUIC_STATUS3_DNRES_SHIFT 5
332#define STATUS3_MPNACK_SHIFT 6 332#define MAX77843_MUIC_STATUS3_MPNACK_SHIFT 6
333 333
334#define MAX77843_MUIC_STATUS1_ADC_MASK (0x1f << STATUS1_ADC_SHIFT) 334#define MAX77843_MUIC_STATUS1_ADC_MASK (0x1f << MAX77843_MUIC_STATUS1_ADC_SHIFT)
335#define MAX77843_MUIC_STATUS1_ADCERROR_MASK BIT(STATUS1_ADCERROR_SHIFT) 335#define MAX77843_MUIC_STATUS1_ADCERROR_MASK BIT(MAX77843_MUIC_STATUS1_ADCERROR_SHIFT)
336#define MAX77843_MUIC_STATUS1_ADC1K_MASK BIT(STATUS1_ADC1K_SHIFT) 336#define MAX77843_MUIC_STATUS1_ADC1K_MASK BIT(MAX77843_MUIC_STATUS1_ADC1K_SHIFT)
337#define MAX77843_MUIC_STATUS2_CHGTYP_MASK (0x7 << STATUS2_CHGTYP_SHIFT) 337#define MAX77843_MUIC_STATUS2_CHGTYP_MASK (0x7 << MAX77843_MUIC_STATUS2_CHGTYP_SHIFT)
338#define MAX77843_MUIC_STATUS2_CHGDETRUN_MASK BIT(STATUS2_CHGDETRUN_SHIFT) 338#define MAX77843_MUIC_STATUS2_CHGDETRUN_MASK BIT(MAX77843_MUIC_STATUS2_CHGDETRUN_SHIFT)
339#define MAX77843_MUIC_STATUS2_DCDTMR_MASK BIT(STATUS2_DCDTMR_SHIFT) 339#define MAX77843_MUIC_STATUS2_DCDTMR_MASK BIT(MAX77843_MUIC_STATUS2_DCDTMR_SHIFT)
340#define MAX77843_MUIC_STATUS2_DXOVP_MASK BIT(STATUS2_DXOVP_SHIFT) 340#define MAX77843_MUIC_STATUS2_DXOVP_MASK BIT(MAX77843_MUIC_STATUS2_DXOVP_SHIFT)
341#define MAX77843_MUIC_STATUS2_VBVOLT_MASK BIT(STATUS2_VBVOLT_SHIFT) 341#define MAX77843_MUIC_STATUS2_VBVOLT_MASK BIT(MAX77843_MUIC_STATUS2_VBVOLT_SHIFT)
342#define MAX77843_MUIC_STATUS3_VBADC_MASK (0xf << STATUS3_VBADC_SHIFT) 342#define MAX77843_MUIC_STATUS3_VBADC_MASK (0xf << MAX77843_MUIC_STATUS3_VBADC_SHIFT)
343#define MAX77843_MUIC_STATUS3_VDNMON_MASK BIT(STATUS3_VDNMON_SHIFT) 343#define MAX77843_MUIC_STATUS3_VDNMON_MASK BIT(MAX77843_MUIC_STATUS3_VDNMON_SHIFT)
344#define MAX77843_MUIC_STATUS3_DNRES_MASK BIT(STATUS3_DNRES_SHIFT) 344#define MAX77843_MUIC_STATUS3_DNRES_MASK BIT(MAX77843_MUIC_STATUS3_DNRES_SHIFT)
345#define MAX77843_MUIC_STATUS3_MPNACK_MASK BIT(STATUS3_MPNACK_SHIFT) 345#define MAX77843_MUIC_STATUS3_MPNACK_MASK BIT(MAX77843_MUIC_STATUS3_MPNACK_SHIFT)
346 346
347/* MAX77843 CONTROL register */ 347/* MAX77843 CONTROL register */
348#define CONTROL1_COMP1SW_SHIFT 0 348#define MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT 0
349#define CONTROL1_COMP2SW_SHIFT 3 349#define MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT 3
350#define CONTROL1_IDBEN_SHIFT 7 350#define MAX77843_MUIC_CONTROL1_IDBEN_SHIFT 7
351#define CONTROL2_LOWPWR_SHIFT 0 351#define MAX77843_MUIC_CONTROL2_LOWPWR_SHIFT 0
352#define CONTROL2_ADCEN_SHIFT 1 352#define MAX77843_MUIC_CONTROL2_ADCEN_SHIFT 1
353#define CONTROL2_CPEN_SHIFT 2 353#define MAX77843_MUIC_CONTROL2_CPEN_SHIFT 2
354#define CONTROL2_ACC_DET_SHIFT 5 354#define MAX77843_MUIC_CONTROL2_ACC_DET_SHIFT 5
355#define CONTROL2_USBCPINT_SHIFT 6 355#define MAX77843_MUIC_CONTROL2_USBCPINT_SHIFT 6
356#define CONTROL2_RCPS_SHIFT 7 356#define MAX77843_MUIC_CONTROL2_RCPS_SHIFT 7
357#define CONTROL3_JIGSET_SHIFT 0 357#define MAX77843_MUIC_CONTROL3_JIGSET_SHIFT 0
358#define CONTROL4_ADCDBSET_SHIFT 0 358#define MAX77843_MUIC_CONTROL4_ADCDBSET_SHIFT 0
359#define CONTROL4_USBAUTO_SHIFT 4 359#define MAX77843_MUIC_CONTROL4_USBAUTO_SHIFT 4
360#define CONTROL4_FCTAUTO_SHIFT 5 360#define MAX77843_MUIC_CONTROL4_FCTAUTO_SHIFT 5
361#define CONTROL4_ADCMODE_SHIFT 6 361#define MAX77843_MUIC_CONTROL4_ADCMODE_SHIFT 6
362 362
363#define MAX77843_MUIC_CONTROL1_COMP1SW_MASK (0x7 << CONTROL1_COMP1SW_SHIFT) 363#define MAX77843_MUIC_CONTROL1_COMP1SW_MASK (0x7 << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT)
364#define MAX77843_MUIC_CONTROL1_COMP2SW_MASK (0x7 << CONTROL1_COMP2SW_SHIFT) 364#define MAX77843_MUIC_CONTROL1_COMP2SW_MASK (0x7 << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT)
365#define MAX77843_MUIC_CONTROL1_IDBEN_MASK BIT(CONTROL1_IDBEN_SHIFT) 365#define MAX77843_MUIC_CONTROL1_IDBEN_MASK BIT(MAX77843_MUIC_CONTROL1_IDBEN_SHIFT)
366#define MAX77843_MUIC_CONTROL2_LOWPWR_MASK BIT(CONTROL2_LOWPWR_SHIFT) 366#define MAX77843_MUIC_CONTROL2_LOWPWR_MASK BIT(MAX77843_MUIC_CONTROL2_LOWPWR_SHIFT)
367#define MAX77843_MUIC_CONTROL2_ADCEN_MASK BIT(CONTROL2_ADCEN_SHIFT) 367#define MAX77843_MUIC_CONTROL2_ADCEN_MASK BIT(MAX77843_MUIC_CONTROL2_ADCEN_SHIFT)
368#define MAX77843_MUIC_CONTROL2_CPEN_MASK BIT(CONTROL2_CPEN_SHIFT) 368#define MAX77843_MUIC_CONTROL2_CPEN_MASK BIT(MAX77843_MUIC_CONTROL2_CPEN_SHIFT)
369#define MAX77843_MUIC_CONTROL2_ACC_DET_MASK BIT(CONTROL2_ACC_DET_SHIFT) 369#define MAX77843_MUIC_CONTROL2_ACC_DET_MASK BIT(MAX77843_MUIC_CONTROL2_ACC_DET_SHIFT)
370#define MAX77843_MUIC_CONTROL2_USBCPINT_MASK BIT(CONTROL2_USBCPINT_SHIFT) 370#define MAX77843_MUIC_CONTROL2_USBCPINT_MASK BIT(MAX77843_MUIC_CONTROL2_USBCPINT_SHIFT)
371#define MAX77843_MUIC_CONTROL2_RCPS_MASK BIT(CONTROL2_RCPS_SHIFT) 371#define MAX77843_MUIC_CONTROL2_RCPS_MASK BIT(MAX77843_MUIC_CONTROL2_RCPS_SHIFT)
372#define MAX77843_MUIC_CONTROL3_JIGSET_MASK (0x3 << CONTROL3_JIGSET_SHIFT) 372#define MAX77843_MUIC_CONTROL3_JIGSET_MASK (0x3 << MAX77843_MUIC_CONTROL3_JIGSET_SHIFT)
373#define MAX77843_MUIC_CONTROL4_ADCDBSET_MASK (0x3 << CONTROL4_ADCDBSET_SHIFT) 373#define MAX77843_MUIC_CONTROL4_ADCDBSET_MASK (0x3 << MAX77843_MUIC_CONTROL4_ADCDBSET_SHIFT)
374#define MAX77843_MUIC_CONTROL4_USBAUTO_MASK BIT(CONTROL4_USBAUTO_SHIFT) 374#define MAX77843_MUIC_CONTROL4_USBAUTO_MASK BIT(MAX77843_MUIC_CONTROL4_USBAUTO_SHIFT)
375#define MAX77843_MUIC_CONTROL4_FCTAUTO_MASK BIT(CONTROL4_FCTAUTO_SHIFT) 375#define MAX77843_MUIC_CONTROL4_FCTAUTO_MASK BIT(MAX77843_MUIC_CONTROL4_FCTAUTO_SHIFT)
376#define MAX77843_MUIC_CONTROL4_ADCMODE_MASK (0x3 << CONTROL4_ADCMODE_SHIFT) 376#define MAX77843_MUIC_CONTROL4_ADCMODE_MASK (0x3 << MAX77843_MUIC_CONTROL4_ADCMODE_SHIFT)
377 377
378/* MAX77843 switch port */ 378/* MAX77843 switch port */
379#define COM_OPEN 0 379#define COM_OPEN 0
@@ -383,38 +383,38 @@ enum max77843_irq_muic {
383#define COM_AUX_USB 4 383#define COM_AUX_USB 4
384#define COM_AUX_UART 5 384#define COM_AUX_UART 5
385 385
386#define CONTROL1_COM_SW \ 386#define MAX77843_MUIC_CONTROL1_COM_SW \
387 ((MAX77843_MUIC_CONTROL1_COMP1SW_MASK | \ 387 ((MAX77843_MUIC_CONTROL1_COMP1SW_MASK | \
388 MAX77843_MUIC_CONTROL1_COMP2SW_MASK)) 388 MAX77843_MUIC_CONTROL1_COMP2SW_MASK))
389 389
390#define CONTROL1_SW_OPEN \ 390#define MAX77843_MUIC_CONTROL1_SW_OPEN \
391 ((COM_OPEN << CONTROL1_COMP1SW_SHIFT | \ 391 ((COM_OPEN << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \
392 COM_OPEN << CONTROL1_COMP2SW_SHIFT)) 392 COM_OPEN << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT))
393#define CONTROL1_SW_USB \ 393#define MAX77843_MUIC_CONTROL1_SW_USB \
394 ((COM_USB << CONTROL1_COMP1SW_SHIFT | \ 394 ((COM_USB << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \
395 COM_USB << CONTROL1_COMP2SW_SHIFT)) 395 COM_USB << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT))
396#define CONTROL1_SW_AUDIO \ 396#define MAX77843_MUIC_CONTROL1_SW_AUDIO \
397 ((COM_AUDIO << CONTROL1_COMP1SW_SHIFT | \ 397 ((COM_AUDIO << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \
398 COM_AUDIO << CONTROL1_COMP2SW_SHIFT)) 398 COM_AUDIO << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT))
399#define CONTROL1_SW_UART \ 399#define MAX77843_MUIC_CONTROL1_SW_UART \
400 ((COM_UART << CONTROL1_COMP1SW_SHIFT | \ 400 ((COM_UART << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \
401 COM_UART << CONTROL1_COMP2SW_SHIFT)) 401 COM_UART << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT))
402#define CONTROL1_SW_AUX_USB \ 402#define MAX77843_MUIC_CONTROL1_SW_AUX_USB \
403 ((COM_AUX_USB << CONTROL1_COMP1SW_SHIFT | \ 403 ((COM_AUX_USB << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \
404 COM_AUX_USB << CONTROL1_COMP2SW_SHIFT)) 404 COM_AUX_USB << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT))
405#define CONTROL1_SW_AUX_UART \ 405#define MAX77843_MUIC_CONTROL1_SW_AUX_UART \
406 ((COM_AUX_UART << CONTROL1_COMP1SW_SHIFT | \ 406 ((COM_AUX_UART << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \
407 COM_AUX_UART << CONTROL1_COMP2SW_SHIFT)) 407 COM_AUX_UART << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT))
408 408
409#define MAX77843_DISABLE 0 409#define MAX77843_DISABLE 0
410#define MAX77843_ENABLE 1 410#define MAX77843_ENABLE 1
411 411
412#define CONTROL4_AUTO_DISABLE \ 412#define CONTROL4_AUTO_DISABLE \
413 ((MAX77843_DISABLE << CONTROL4_USBAUTO_SHIFT) | \ 413 ((MAX77843_DISABLE << MAX77843_MUIC_CONTROL4_USBAUTO_SHIFT) | \
414 (MAX77843_DISABLE << CONTROL4_FCTAUTO_SHIFT)) 414 (MAX77843_DISABLE << MAX77843_MUIC_CONTROL4_FCTAUTO_SHIFT))
415#define CONTROL4_AUTO_ENABLE \ 415#define CONTROL4_AUTO_ENABLE \
416 ((MAX77843_ENABLE << CONTROL4_USBAUTO_SHIFT) | \ 416 ((MAX77843_ENABLE << MAX77843_MUIC_CONTROL4_USBAUTO_SHIFT) | \
417 (MAX77843_ENABLE << CONTROL4_FCTAUTO_SHIFT)) 417 (MAX77843_ENABLE << MAX77843_MUIC_CONTROL4_FCTAUTO_SHIFT))
418 418
419/* MAX77843 SAFEOUT LDO Control register */ 419/* MAX77843 SAFEOUT LDO Control register */
420#define SAFEOUTCTRL_SAFEOUT1_SHIFT 0 420#define SAFEOUTCTRL_SAFEOUT1_SHIFT 0
@@ -431,24 +431,4 @@ enum max77843_irq_muic {
431#define MAX77843_REG_SAFEOUTCTRL_SAFEOUT2_MASK \ 431#define MAX77843_REG_SAFEOUTCTRL_SAFEOUT2_MASK \
432 (0x3 << SAFEOUTCTRL_SAFEOUT2_SHIFT) 432 (0x3 << SAFEOUTCTRL_SAFEOUT2_SHIFT)
433 433
434struct max77843 {
435 struct device *dev;
436
437 struct i2c_client *i2c;
438 struct i2c_client *i2c_chg;
439 struct i2c_client *i2c_fuel;
440 struct i2c_client *i2c_muic;
441
442 struct regmap *regmap;
443 struct regmap *regmap_chg;
444 struct regmap *regmap_fuel;
445 struct regmap *regmap_muic;
446
447 struct regmap_irq_chip_data *irq_data;
448 struct regmap_irq_chip_data *irq_data_chg;
449 struct regmap_irq_chip_data *irq_data_fuel;
450 struct regmap_irq_chip_data *irq_data_muic;
451
452 int irq;
453};
454#endif /* __MAX77843_H__ */ 434#endif /* __MAX77843_H__ */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 2e872f92dbac..bf6f117fcf4d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1003,6 +1003,34 @@ static inline int page_mapped(struct page *page)
1003} 1003}
1004 1004
1005/* 1005/*
1006 * Return true only if the page has been allocated with
1007 * ALLOC_NO_WATERMARKS and the low watermark was not
1008 * met implying that the system is under some pressure.
1009 */
1010static inline bool page_is_pfmemalloc(struct page *page)
1011{
1012 /*
1013 * Page index cannot be this large so this must be
1014 * a pfmemalloc page.
1015 */
1016 return page->index == -1UL;
1017}
1018
1019/*
1020 * Only to be called by the page allocator on a freshly allocated
1021 * page.
1022 */
1023static inline void set_page_pfmemalloc(struct page *page)
1024{
1025 page->index = -1UL;
1026}
1027
1028static inline void clear_page_pfmemalloc(struct page *page)
1029{
1030 page->index = 0;
1031}
1032
1033/*
1006 * Different kinds of faults, as returned by handle_mm_fault(). 1034 * Different kinds of faults, as returned by handle_mm_fault().
1007 * Used to decide whether a process gets delivered SIGBUS or 1035 * Used to decide whether a process gets delivered SIGBUS or
1008 * just gets major/minor fault counters bumped up. 1036 * just gets major/minor fault counters bumped up.
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 0038ac7466fd..15549578d559 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -63,15 +63,6 @@ struct page {
63 union { 63 union {
64 pgoff_t index; /* Our offset within mapping. */ 64 pgoff_t index; /* Our offset within mapping. */
65 void *freelist; /* sl[aou]b first free object */ 65 void *freelist; /* sl[aou]b first free object */
66 bool pfmemalloc; /* If set by the page allocator,
67 * ALLOC_NO_WATERMARKS was set
68 * and the low watermark was not
69 * met implying that the system
70 * is under some pressure. The
71 * caller should try ensure
72 * this page is only used to
73 * free other pages.
74 */
75 }; 66 };
76 67
77 union { 68 union {
diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
index c5d52780d6a0..3ba327af055c 100644
--- a/include/linux/mmiotrace.h
+++ b/include/linux/mmiotrace.h
@@ -106,6 +106,6 @@ extern void enable_mmiotrace(void);
106extern void disable_mmiotrace(void); 106extern void disable_mmiotrace(void);
107extern void mmio_trace_rw(struct mmiotrace_rw *rw); 107extern void mmio_trace_rw(struct mmiotrace_rw *rw);
108extern void mmio_trace_mapping(struct mmiotrace_map *map); 108extern void mmio_trace_mapping(struct mmiotrace_map *map);
109extern int mmio_trace_printk(const char *fmt, va_list args); 109extern __printf(1, 0) int mmio_trace_printk(const char *fmt, va_list args);
110 110
111#endif /* _LINUX_MMIOTRACE_H */ 111#endif /* _LINUX_MMIOTRACE_H */
diff --git a/include/linux/module.h b/include/linux/module.h
index d67b1932cc59..3a19c79918e0 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -11,6 +11,7 @@
11#include <linux/compiler.h> 11#include <linux/compiler.h>
12#include <linux/cache.h> 12#include <linux/cache.h>
13#include <linux/kmod.h> 13#include <linux/kmod.h>
14#include <linux/init.h>
14#include <linux/elf.h> 15#include <linux/elf.h>
15#include <linux/stringify.h> 16#include <linux/stringify.h>
16#include <linux/kobject.h> 17#include <linux/kobject.h>
@@ -71,6 +72,89 @@ extern struct module_attribute module_uevent;
71extern int init_module(void); 72extern int init_module(void);
72extern void cleanup_module(void); 73extern void cleanup_module(void);
73 74
75#ifndef MODULE
76/**
77 * module_init() - driver initialization entry point
78 * @x: function to be run at kernel boot time or module insertion
79 *
80 * module_init() will either be called during do_initcalls() (if
81 * builtin) or at module insertion time (if a module). There can only
82 * be one per module.
83 */
84#define module_init(x) __initcall(x);
85
86/**
87 * module_exit() - driver exit entry point
88 * @x: function to be run when driver is removed
89 *
90 * module_exit() will wrap the driver clean-up code
91 * with cleanup_module() when used with rmmod when
92 * the driver is a module. If the driver is statically
93 * compiled into the kernel, module_exit() has no effect.
94 * There can only be one per module.
95 */
96#define module_exit(x) __exitcall(x);
97
98#else /* MODULE */
99
100/*
101 * In most cases loadable modules do not need custom
102 * initcall levels. There are still some valid cases where
103 * a driver may be needed early if built in, and does not
104 * matter when built as a loadable module. Like bus
105 * snooping debug drivers.
106 */
107#define early_initcall(fn) module_init(fn)
108#define core_initcall(fn) module_init(fn)
109#define core_initcall_sync(fn) module_init(fn)
110#define postcore_initcall(fn) module_init(fn)
111#define postcore_initcall_sync(fn) module_init(fn)
112#define arch_initcall(fn) module_init(fn)
113#define subsys_initcall(fn) module_init(fn)
114#define subsys_initcall_sync(fn) module_init(fn)
115#define fs_initcall(fn) module_init(fn)
116#define fs_initcall_sync(fn) module_init(fn)
117#define rootfs_initcall(fn) module_init(fn)
118#define device_initcall(fn) module_init(fn)
119#define device_initcall_sync(fn) module_init(fn)
120#define late_initcall(fn) module_init(fn)
121#define late_initcall_sync(fn) module_init(fn)
122
123#define console_initcall(fn) module_init(fn)
124#define security_initcall(fn) module_init(fn)
125
126/* Each module must use one module_init(). */
127#define module_init(initfn) \
128 static inline initcall_t __inittest(void) \
129 { return initfn; } \
130 int init_module(void) __attribute__((alias(#initfn)));
131
132/* This is only required if you want to be unloadable. */
133#define module_exit(exitfn) \
134 static inline exitcall_t __exittest(void) \
135 { return exitfn; } \
136 void cleanup_module(void) __attribute__((alias(#exitfn)));
137
138#endif
139
140/* This means "can be init if no module support, otherwise module load
141 may call it." */
142#ifdef CONFIG_MODULES
143#define __init_or_module
144#define __initdata_or_module
145#define __initconst_or_module
146#define __INIT_OR_MODULE .text
147#define __INITDATA_OR_MODULE .data
148#define __INITRODATA_OR_MODULE .section ".rodata","a",%progbits
149#else
150#define __init_or_module __init
151#define __initdata_or_module __initdata
152#define __initconst_or_module __initconst
153#define __INIT_OR_MODULE __INIT
154#define __INITDATA_OR_MODULE __INITDATA
155#define __INITRODATA_OR_MODULE __INITRODATA
156#endif /*CONFIG_MODULES*/
157
74/* Archs provide a method of finding the correct exception table. */ 158/* Archs provide a method of finding the correct exception table. */
75struct exception_table_entry; 159struct exception_table_entry;
76 160
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index f25e2bdd188c..272f42952f34 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -178,17 +178,17 @@ typedef enum {
178/* Chip may not exist, so silence any errors in scan */ 178/* Chip may not exist, so silence any errors in scan */
179#define NAND_SCAN_SILENT_NODEV 0x00040000 179#define NAND_SCAN_SILENT_NODEV 0x00040000
180/* 180/*
181 * This option could be defined by controller drivers to protect against
182 * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers
183 */
184#define NAND_USE_BOUNCE_BUFFER 0x00080000
185/*
186 * Autodetect nand buswidth with readid/onfi. 181 * Autodetect nand buswidth with readid/onfi.
187 * This suppose the driver will configure the hardware in 8 bits mode 182 * This suppose the driver will configure the hardware in 8 bits mode
188 * when calling nand_scan_ident, and update its configuration 183 * when calling nand_scan_ident, and update its configuration
189 * before calling nand_scan_tail. 184 * before calling nand_scan_tail.
190 */ 185 */
191#define NAND_BUSWIDTH_AUTO 0x00080000 186#define NAND_BUSWIDTH_AUTO 0x00080000
187/*
188 * This option could be defined by controller drivers to protect against
189 * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers
190 */
191#define NAND_USE_BOUNCE_BUFFER 0x00100000
192 192
193/* Options set by nand scan */ 193/* Options set by nand scan */
194/* Nand scan has allocated controller struct */ 194/* Nand scan has allocated controller struct */
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index f91b5ade30c9..874b77228fb9 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -292,9 +292,12 @@ static inline void nfs_mark_for_revalidate(struct inode *inode)
292 struct nfs_inode *nfsi = NFS_I(inode); 292 struct nfs_inode *nfsi = NFS_I(inode);
293 293
294 spin_lock(&inode->i_lock); 294 spin_lock(&inode->i_lock);
295 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS; 295 nfsi->cache_validity |= NFS_INO_INVALID_ATTR |
296 NFS_INO_REVAL_PAGECACHE |
297 NFS_INO_INVALID_ACCESS |
298 NFS_INO_INVALID_ACL;
296 if (S_ISDIR(inode->i_mode)) 299 if (S_ISDIR(inode->i_mode))
297 nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA; 300 nfsi->cache_validity |= NFS_INO_INVALID_DATA;
298 spin_unlock(&inode->i_lock); 301 spin_unlock(&inode->i_lock);
299} 302}
300 303
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index a2ea1491d3df..20bc8e51b161 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -220,7 +220,7 @@ struct nfs_server {
220#define NFS_CAP_SYMLINKS (1U << 2) 220#define NFS_CAP_SYMLINKS (1U << 2)
221#define NFS_CAP_ACLS (1U << 3) 221#define NFS_CAP_ACLS (1U << 3)
222#define NFS_CAP_ATOMIC_OPEN (1U << 4) 222#define NFS_CAP_ATOMIC_OPEN (1U << 4)
223#define NFS_CAP_CHANGE_ATTR (1U << 5) 223/* #define NFS_CAP_CHANGE_ATTR (1U << 5) */
224#define NFS_CAP_FILEID (1U << 6) 224#define NFS_CAP_FILEID (1U << 6)
225#define NFS_CAP_MODE (1U << 7) 225#define NFS_CAP_MODE (1U << 7)
226#define NFS_CAP_NLINK (1U << 8) 226#define NFS_CAP_NLINK (1U << 8)
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index 4c508549833a..cc7dd687a89d 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -59,7 +59,7 @@ void of_dma_configure(struct device *dev, struct device_node *np);
59#else /* CONFIG_OF */ 59#else /* CONFIG_OF */
60 60
61static inline int of_driver_match_device(struct device *dev, 61static inline int of_driver_match_device(struct device *dev,
62 struct device_driver *drv) 62 const struct device_driver *drv)
63{ 63{
64 return 0; 64 return 0;
65} 65}
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index f34e040b34e9..41c93844fb1d 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -631,15 +631,19 @@ static inline void ClearPageSlabPfmemalloc(struct page *page)
631 1 << PG_private | 1 << PG_private_2 | \ 631 1 << PG_private | 1 << PG_private_2 | \
632 1 << PG_writeback | 1 << PG_reserved | \ 632 1 << PG_writeback | 1 << PG_reserved | \
633 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ 633 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \
634 1 << PG_unevictable | __PG_MLOCKED | __PG_HWPOISON | \ 634 1 << PG_unevictable | __PG_MLOCKED | \
635 __PG_COMPOUND_LOCK) 635 __PG_COMPOUND_LOCK)
636 636
637/* 637/*
638 * Flags checked when a page is prepped for return by the page allocator. 638 * Flags checked when a page is prepped for return by the page allocator.
639 * Pages being prepped should not have any flags set. It they are set, 639 * Pages being prepped should not have these flags set. It they are set,
640 * there has been a kernel bug or struct page corruption. 640 * there has been a kernel bug or struct page corruption.
641 *
642 * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
643 * alloc-free cycle to prevent from reusing the page.
641 */ 644 */
642#define PAGE_FLAGS_CHECK_AT_PREP ((1 << NR_PAGEFLAGS) - 1) 645#define PAGE_FLAGS_CHECK_AT_PREP \
646 (((1 << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
643 647
644#define PAGE_FLAGS_PRIVATE \ 648#define PAGE_FLAGS_PRIVATE \
645 (1 << PG_private | 1 << PG_private_2) 649 (1 << PG_private | 1 << PG_private_2)
diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
index b48c3471c254..cacaabea8a09 100644
--- a/include/linux/page_owner.h
+++ b/include/linux/page_owner.h
@@ -8,6 +8,7 @@ extern struct page_ext_operations page_owner_ops;
8extern void __reset_page_owner(struct page *page, unsigned int order); 8extern void __reset_page_owner(struct page *page, unsigned int order);
9extern void __set_page_owner(struct page *page, 9extern void __set_page_owner(struct page *page,
10 unsigned int order, gfp_t gfp_mask); 10 unsigned int order, gfp_t gfp_mask);
11extern gfp_t __get_page_owner_gfp(struct page *page);
11 12
12static inline void reset_page_owner(struct page *page, unsigned int order) 13static inline void reset_page_owner(struct page *page, unsigned int order)
13{ 14{
@@ -25,6 +26,14 @@ static inline void set_page_owner(struct page *page,
25 26
26 __set_page_owner(page, order, gfp_mask); 27 __set_page_owner(page, order, gfp_mask);
27} 28}
29
30static inline gfp_t get_page_owner_gfp(struct page *page)
31{
32 if (likely(!page_owner_inited))
33 return 0;
34
35 return __get_page_owner_gfp(page);
36}
28#else 37#else
29static inline void reset_page_owner(struct page *page, unsigned int order) 38static inline void reset_page_owner(struct page *page, unsigned int order)
30{ 39{
@@ -33,6 +42,10 @@ static inline void set_page_owner(struct page *page,
33 unsigned int order, gfp_t gfp_mask) 42 unsigned int order, gfp_t gfp_mask)
34{ 43{
35} 44}
45static inline gfp_t get_page_owner_gfp(struct page *page)
46{
47 return 0;
48}
36 49
37#endif /* CONFIG_PAGE_OWNER */ 50#endif /* CONFIG_PAGE_OWNER */
38#endif /* __LINUX_PAGE_OWNER_H */ 51#endif /* __LINUX_PAGE_OWNER_H */
diff --git a/include/linux/pata_arasan_cf_data.h b/include/linux/pata_arasan_cf_data.h
index 3cc21c9cc1e8..9fade5dd2e86 100644
--- a/include/linux/pata_arasan_cf_data.h
+++ b/include/linux/pata_arasan_cf_data.h
@@ -4,7 +4,7 @@
4 * Arasan Compact Flash host controller platform data header file 4 * Arasan Compact Flash host controller platform data header file
5 * 5 *
6 * Copyright (C) 2011 ST Microelectronics 6 * Copyright (C) 2011 ST Microelectronics
7 * Viresh Kumar <viresh.linux@gmail.com> 7 * Viresh Kumar <vireshk@kernel.org>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/include/linux/platform_data/macb.h b/include/linux/platform_data/macb.h
index 044a124bfbbc..21b15f6fee25 100644
--- a/include/linux/platform_data/macb.h
+++ b/include/linux/platform_data/macb.h
@@ -8,11 +8,19 @@
8#ifndef __MACB_PDATA_H__ 8#ifndef __MACB_PDATA_H__
9#define __MACB_PDATA_H__ 9#define __MACB_PDATA_H__
10 10
11/**
12 * struct macb_platform_data - platform data for MACB Ethernet
13 * @phy_mask: phy mask passed when register the MDIO bus
14 * within the driver
15 * @phy_irq_pin: PHY IRQ
16 * @is_rmii: using RMII interface?
17 * @rev_eth_addr: reverse Ethernet address byte order
18 */
11struct macb_platform_data { 19struct macb_platform_data {
12 u32 phy_mask; 20 u32 phy_mask;
13 int phy_irq_pin; /* PHY IRQ */ 21 int phy_irq_pin;
14 u8 is_rmii; /* using RMII interface? */ 22 u8 is_rmii;
15 u8 rev_eth_addr; /* reverse Ethernet address byte order */ 23 u8 rev_eth_addr;
16}; 24};
17 25
18#endif /* __MACB_PDATA_H__ */ 26#endif /* __MACB_PDATA_H__ */
diff --git a/include/linux/platform_data/mmc-esdhc-imx.h b/include/linux/platform_data/mmc-esdhc-imx.h
index 75f70f6ac137..e1571efa3f2b 100644
--- a/include/linux/platform_data/mmc-esdhc-imx.h
+++ b/include/linux/platform_data/mmc-esdhc-imx.h
@@ -43,7 +43,6 @@ struct esdhc_platform_data {
43 enum wp_types wp_type; 43 enum wp_types wp_type;
44 enum cd_types cd_type; 44 enum cd_types cd_type;
45 int max_bus_width; 45 int max_bus_width;
46 unsigned int f_max;
47 bool support_vsel; 46 bool support_vsel;
48 unsigned int delay_line; 47 unsigned int delay_line;
49}; 48};
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 58b1fec40d37..a6298b27ac99 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -122,7 +122,7 @@ static inline __printf(1, 2) __cold
122void early_printk(const char *s, ...) { } 122void early_printk(const char *s, ...) { }
123#endif 123#endif
124 124
125typedef int(*printk_func_t)(const char *fmt, va_list args); 125typedef __printf(1, 0) int (*printk_func_t)(const char *fmt, va_list args);
126 126
127#ifdef CONFIG_PRINTK 127#ifdef CONFIG_PRINTK
128asmlinkage __printf(5, 0) 128asmlinkage __printf(5, 0)
@@ -166,7 +166,7 @@ char *log_buf_addr_get(void);
166u32 log_buf_len_get(void); 166u32 log_buf_len_get(void);
167void log_buf_kexec_setup(void); 167void log_buf_kexec_setup(void);
168void __init setup_log_buf(int early); 168void __init setup_log_buf(int early);
169void dump_stack_set_arch_desc(const char *fmt, ...); 169__printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...);
170void dump_stack_print_info(const char *log_lvl); 170void dump_stack_print_info(const char *log_lvl);
171void show_regs_print_info(const char *log_lvl); 171void show_regs_print_info(const char *log_lvl);
172#else 172#else
@@ -217,7 +217,7 @@ static inline void setup_log_buf(int early)
217{ 217{
218} 218}
219 219
220static inline void dump_stack_set_arch_desc(const char *fmt, ...) 220static inline __printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...)
221{ 221{
222} 222}
223 223
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index f8a689ed62a5..9e0e76992be0 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -550,8 +550,24 @@ static inline int regulator_count_voltages(struct regulator *regulator)
550{ 550{
551 return 0; 551 return 0;
552} 552}
553
554static inline int regulator_list_voltage(struct regulator *regulator, unsigned selector)
555{
556 return -EINVAL;
557}
558
553#endif 559#endif
554 560
561static inline int regulator_set_voltage_triplet(struct regulator *regulator,
562 int min_uV, int target_uV,
563 int max_uV)
564{
565 if (regulator_set_voltage(regulator, target_uV, max_uV) == 0)
566 return 0;
567
568 return regulator_set_voltage(regulator, min_uV, max_uV);
569}
570
555static inline int regulator_set_voltage_tol(struct regulator *regulator, 571static inline int regulator_set_voltage_tol(struct regulator *regulator,
556 int new_uV, int tol_uV) 572 int new_uV, int tol_uV)
557{ 573{
diff --git a/include/linux/regulator/da9211.h b/include/linux/regulator/da9211.h
index 5dd65acc2a69..a43a5ca1167b 100644
--- a/include/linux/regulator/da9211.h
+++ b/include/linux/regulator/da9211.h
@@ -1,16 +1,16 @@
1/* 1/*
2 * da9211.h - Regulator device driver for DA9211/DA9213 2 * da9211.h - Regulator device driver for DA9211/DA9213/DA9215
3 * Copyright (C) 2014 Dialog Semiconductor Ltd. 3 * Copyright (C) 2015 Dialog Semiconductor Ltd.
4 * 4 *
5 * This library is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Library General Public 6 * modify it under the terms of the GNU General Public License
7 * License as published by the Free Software Foundation; either 7 * as published by the Free Software Foundation; either version 2
8 * version 2 of the License, or (at your option) any later version. 8 * of the License, or (at your option) any later version.
9 * 9 *
10 * This library is distributed in the hope that it will be useful, 10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * Library General Public License for more details. 13 * GNU General Public License for more details.
14 */ 14 */
15 15
16#ifndef __LINUX_REGULATOR_DA9211_H 16#ifndef __LINUX_REGULATOR_DA9211_H
@@ -23,6 +23,7 @@
23enum da9211_chip_id { 23enum da9211_chip_id {
24 DA9211, 24 DA9211,
25 DA9213, 25 DA9213,
26 DA9215,
26}; 27};
27 28
28struct da9211_pdata { 29struct da9211_pdata {
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 4db9fbe4889d..45932228cbf5 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -148,6 +148,7 @@ struct regulator_ops {
148 int (*get_current_limit) (struct regulator_dev *); 148 int (*get_current_limit) (struct regulator_dev *);
149 149
150 int (*set_input_current_limit) (struct regulator_dev *, int lim_uA); 150 int (*set_input_current_limit) (struct regulator_dev *, int lim_uA);
151 int (*set_over_current_protection) (struct regulator_dev *);
151 152
152 /* enable/disable regulator */ 153 /* enable/disable regulator */
153 int (*enable) (struct regulator_dev *); 154 int (*enable) (struct regulator_dev *);
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index b11be1260129..a1067d0b3991 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -147,6 +147,7 @@ struct regulation_constraints {
147 unsigned ramp_disable:1; /* disable ramp delay */ 147 unsigned ramp_disable:1; /* disable ramp delay */
148 unsigned soft_start:1; /* ramp voltage slowly */ 148 unsigned soft_start:1; /* ramp voltage slowly */
149 unsigned pull_down:1; /* pull down resistor when regulator off */ 149 unsigned pull_down:1; /* pull down resistor when regulator off */
150 unsigned over_current_protection:1; /* auto disable on over current */
150}; 151};
151 152
152/** 153/**
diff --git a/include/linux/regulator/mt6311.h b/include/linux/regulator/mt6311.h
new file mode 100644
index 000000000000..8473259395b6
--- /dev/null
+++ b/include/linux/regulator/mt6311.h
@@ -0,0 +1,29 @@
1/*
2 * Copyright (c) 2015 MediaTek Inc.
3 * Author: Henry Chen <henryc.chen@mediatek.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#ifndef __LINUX_REGULATOR_MT6311_H
16#define __LINUX_REGULATOR_MT6311_H
17
18#define MT6311_MAX_REGULATORS 2
19
20enum {
21 MT6311_ID_VDVFS = 0,
22 MT6311_ID_VBIASN,
23};
24
25#define MT6311_E1_CID_CODE 0x10
26#define MT6311_E2_CID_CODE 0x20
27#define MT6311_E3_CID_CODE 0x30
28
29#endif /* __LINUX_REGULATOR_MT6311_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index ae21f1591615..04b5ada460b4 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1522,8 +1522,6 @@ struct task_struct {
1522/* hung task detection */ 1522/* hung task detection */
1523 unsigned long last_switch_count; 1523 unsigned long last_switch_count;
1524#endif 1524#endif
1525/* CPU-specific state of this task */
1526 struct thread_struct thread;
1527/* filesystem information */ 1525/* filesystem information */
1528 struct fs_struct *fs; 1526 struct fs_struct *fs;
1529/* open file information */ 1527/* open file information */
@@ -1778,8 +1776,22 @@ struct task_struct {
1778 unsigned long task_state_change; 1776 unsigned long task_state_change;
1779#endif 1777#endif
1780 int pagefault_disabled; 1778 int pagefault_disabled;
1779/* CPU-specific state of this task */
1780 struct thread_struct thread;
1781/*
1782 * WARNING: on x86, 'thread_struct' contains a variable-sized
1783 * structure. It *MUST* be at the end of 'task_struct'.
1784 *
1785 * Do not put anything below here!
1786 */
1781}; 1787};
1782 1788
1789#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
1790extern int arch_task_struct_size __read_mostly;
1791#else
1792# define arch_task_struct_size (sizeof(struct task_struct))
1793#endif
1794
1783/* Future-safe accessor for struct task_struct's cpus_allowed. */ 1795/* Future-safe accessor for struct task_struct's cpus_allowed. */
1784#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) 1796#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
1785 1797
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index d6cdd6e87d53..9b88536487e6 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1602,20 +1602,16 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
1602 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1602 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1603 1603
1604 /* 1604 /*
1605 * Propagate page->pfmemalloc to the skb if we can. The problem is 1605 * Propagate page pfmemalloc to the skb if we can. The problem is
1606 * that not all callers have unique ownership of the page. If 1606 * that not all callers have unique ownership of the page but rely
1607 * pfmemalloc is set, we check the mapping as a mapping implies 1607 * on page_is_pfmemalloc doing the right thing(tm).
1608 * page->index is set (index and pfmemalloc share space).
1609 * If it's a valid mapping, we cannot use page->pfmemalloc but we
1610 * do not lose pfmemalloc information as the pages would not be
1611 * allocated using __GFP_MEMALLOC.
1612 */ 1608 */
1613 frag->page.p = page; 1609 frag->page.p = page;
1614 frag->page_offset = off; 1610 frag->page_offset = off;
1615 skb_frag_size_set(frag, size); 1611 skb_frag_size_set(frag, size);
1616 1612
1617 page = compound_head(page); 1613 page = compound_head(page);
1618 if (page->pfmemalloc && !page->mapping) 1614 if (page_is_pfmemalloc(page))
1619 skb->pfmemalloc = true; 1615 skb->pfmemalloc = true;
1620} 1616}
1621 1617
@@ -2263,7 +2259,7 @@ static inline struct page *dev_alloc_page(void)
2263static inline void skb_propagate_pfmemalloc(struct page *page, 2259static inline void skb_propagate_pfmemalloc(struct page *page,
2264 struct sk_buff *skb) 2260 struct sk_buff *skb)
2265{ 2261{
2266 if (page && page->pfmemalloc) 2262 if (page_is_pfmemalloc(page))
2267 skb->pfmemalloc = true; 2263 skb->pfmemalloc = true;
2268} 2264}
2269 2265
@@ -2884,11 +2880,11 @@ static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
2884 * 2880 *
2885 * PHY drivers may accept clones of transmitted packets for 2881 * PHY drivers may accept clones of transmitted packets for
2886 * timestamping via their phy_driver.txtstamp method. These drivers 2882 * timestamping via their phy_driver.txtstamp method. These drivers
2887 * must call this function to return the skb back to the stack, with 2883 * must call this function to return the skb back to the stack with a
2888 * or without a timestamp. 2884 * timestamp.
2889 * 2885 *
2890 * @skb: clone of the the original outgoing packet 2886 * @skb: clone of the the original outgoing packet
2891 * @hwtstamps: hardware time stamps, may be NULL if not available 2887 * @hwtstamps: hardware time stamps
2892 * 2888 *
2893 */ 2889 */
2894void skb_complete_tx_timestamp(struct sk_buff *skb, 2890void skb_complete_tx_timestamp(struct sk_buff *skb,
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h
index 7c9b484735c5..1f6526c76ee8 100644
--- a/include/linux/usb/cdc_ncm.h
+++ b/include/linux/usb/cdc_ncm.h
@@ -80,6 +80,9 @@
80#define CDC_NCM_TIMER_INTERVAL_MIN 5UL 80#define CDC_NCM_TIMER_INTERVAL_MIN 5UL
81#define CDC_NCM_TIMER_INTERVAL_MAX (U32_MAX / NSEC_PER_USEC) 81#define CDC_NCM_TIMER_INTERVAL_MAX (U32_MAX / NSEC_PER_USEC)
82 82
83/* Driver flags */
84#define CDC_NCM_FLAG_NDP_TO_END 0x02 /* NDP is placed at end of frame */
85
83#define cdc_ncm_comm_intf_is_mbim(x) ((x)->desc.bInterfaceSubClass == USB_CDC_SUBCLASS_MBIM && \ 86#define cdc_ncm_comm_intf_is_mbim(x) ((x)->desc.bInterfaceSubClass == USB_CDC_SUBCLASS_MBIM && \
84 (x)->desc.bInterfaceProtocol == USB_CDC_PROTO_NONE) 87 (x)->desc.bInterfaceProtocol == USB_CDC_PROTO_NONE)
85#define cdc_ncm_data_intf_is_mbim(x) ((x)->desc.bInterfaceProtocol == USB_CDC_MBIM_PROTO_NTB) 88#define cdc_ncm_data_intf_is_mbim(x) ((x)->desc.bInterfaceProtocol == USB_CDC_MBIM_PROTO_NTB)
@@ -103,9 +106,11 @@ struct cdc_ncm_ctx {
103 106
104 spinlock_t mtx; 107 spinlock_t mtx;
105 atomic_t stop; 108 atomic_t stop;
109 int drvflags;
106 110
107 u32 timer_interval; 111 u32 timer_interval;
108 u32 max_ndp_size; 112 u32 max_ndp_size;
113 struct usb_cdc_ncm_ndp16 *delayed_ndp16;
109 114
110 u32 tx_timer_pending; 115 u32 tx_timer_pending;
111 u32 tx_curr_frame_num; 116 u32 tx_curr_frame_num;
@@ -133,7 +138,7 @@ struct cdc_ncm_ctx {
133}; 138};
134 139
135u8 cdc_ncm_select_altsetting(struct usb_interface *intf); 140u8 cdc_ncm_select_altsetting(struct usb_interface *intf);
136int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting); 141int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags);
137void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf); 142void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf);
138struct sk_buff *cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign); 143struct sk_buff *cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign);
139int cdc_ncm_rx_verify_nth16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in); 144int cdc_ncm_rx_verify_nth16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in);
diff --git a/include/media/rc-core.h b/include/media/rc-core.h
index 45534da57759..644bdc61c387 100644
--- a/include/media/rc-core.h
+++ b/include/media/rc-core.h
@@ -74,8 +74,6 @@ enum rc_filter_type {
74 * @input_dev: the input child device used to communicate events to userspace 74 * @input_dev: the input child device used to communicate events to userspace
75 * @driver_type: specifies if protocol decoding is done in hardware or software 75 * @driver_type: specifies if protocol decoding is done in hardware or software
76 * @idle: used to keep track of RX state 76 * @idle: used to keep track of RX state
77 * @encode_wakeup: wakeup filtering uses IR encode API, therefore the allowed
78 * wakeup protocols is the set of all raw encoders
79 * @allowed_protocols: bitmask with the supported RC_BIT_* protocols 77 * @allowed_protocols: bitmask with the supported RC_BIT_* protocols
80 * @enabled_protocols: bitmask with the enabled RC_BIT_* protocols 78 * @enabled_protocols: bitmask with the enabled RC_BIT_* protocols
81 * @allowed_wakeup_protocols: bitmask with the supported RC_BIT_* wakeup protocols 79 * @allowed_wakeup_protocols: bitmask with the supported RC_BIT_* wakeup protocols
@@ -136,7 +134,6 @@ struct rc_dev {
136 struct input_dev *input_dev; 134 struct input_dev *input_dev;
137 enum rc_driver_type driver_type; 135 enum rc_driver_type driver_type;
138 bool idle; 136 bool idle;
139 bool encode_wakeup;
140 u64 allowed_protocols; 137 u64 allowed_protocols;
141 u64 enabled_protocols; 138 u64 enabled_protocols;
142 u64 allowed_wakeup_protocols; 139 u64 allowed_wakeup_protocols;
@@ -246,7 +243,6 @@ static inline void init_ir_raw_event(struct ir_raw_event *ev)
246#define US_TO_NS(usec) ((usec) * 1000) 243#define US_TO_NS(usec) ((usec) * 1000)
247#define MS_TO_US(msec) ((msec) * 1000) 244#define MS_TO_US(msec) ((msec) * 1000)
248#define MS_TO_NS(msec) ((msec) * 1000 * 1000) 245#define MS_TO_NS(msec) ((msec) * 1000 * 1000)
249#define NS_TO_US(nsec) DIV_ROUND_UP(nsec, 1000L)
250 246
251void ir_raw_event_handle(struct rc_dev *dev); 247void ir_raw_event_handle(struct rc_dev *dev);
252int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev); 248int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev);
@@ -254,9 +250,6 @@ int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type);
254int ir_raw_event_store_with_filter(struct rc_dev *dev, 250int ir_raw_event_store_with_filter(struct rc_dev *dev,
255 struct ir_raw_event *ev); 251 struct ir_raw_event *ev);
256void ir_raw_event_set_idle(struct rc_dev *dev, bool idle); 252void ir_raw_event_set_idle(struct rc_dev *dev, bool idle);
257int ir_raw_encode_scancode(u64 protocols,
258 const struct rc_scancode_filter *scancode,
259 struct ir_raw_event *events, unsigned int max);
260 253
261static inline void ir_raw_event_reset(struct rc_dev *dev) 254static inline void ir_raw_event_reset(struct rc_dev *dev)
262{ 255{
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index 22a44c2f5963..c192e1b46cdc 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -139,6 +139,7 @@ enum vb2_io_modes {
139 * @VB2_BUF_STATE_PREPARING: buffer is being prepared in videobuf 139 * @VB2_BUF_STATE_PREPARING: buffer is being prepared in videobuf
140 * @VB2_BUF_STATE_PREPARED: buffer prepared in videobuf and by the driver 140 * @VB2_BUF_STATE_PREPARED: buffer prepared in videobuf and by the driver
141 * @VB2_BUF_STATE_QUEUED: buffer queued in videobuf, but not in driver 141 * @VB2_BUF_STATE_QUEUED: buffer queued in videobuf, but not in driver
142 * @VB2_BUF_STATE_REQUEUEING: re-queue a buffer to the driver
142 * @VB2_BUF_STATE_ACTIVE: buffer queued in driver and possibly used 143 * @VB2_BUF_STATE_ACTIVE: buffer queued in driver and possibly used
143 * in a hardware operation 144 * in a hardware operation
144 * @VB2_BUF_STATE_DONE: buffer returned from driver to videobuf, but 145 * @VB2_BUF_STATE_DONE: buffer returned from driver to videobuf, but
@@ -152,6 +153,7 @@ enum vb2_buffer_state {
152 VB2_BUF_STATE_PREPARING, 153 VB2_BUF_STATE_PREPARING,
153 VB2_BUF_STATE_PREPARED, 154 VB2_BUF_STATE_PREPARED,
154 VB2_BUF_STATE_QUEUED, 155 VB2_BUF_STATE_QUEUED,
156 VB2_BUF_STATE_REQUEUEING,
155 VB2_BUF_STATE_ACTIVE, 157 VB2_BUF_STATE_ACTIVE,
156 VB2_BUF_STATE_DONE, 158 VB2_BUF_STATE_DONE,
157 VB2_BUF_STATE_ERROR, 159 VB2_BUF_STATE_ERROR,
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 3ee4c92afd1b..931738bc5bba 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -99,7 +99,6 @@ struct tc_action_ops {
99 99
100int tcf_hash_search(struct tc_action *a, u32 index); 100int tcf_hash_search(struct tc_action *a, u32 index);
101void tcf_hash_destroy(struct tc_action *a); 101void tcf_hash_destroy(struct tc_action *a);
102int tcf_hash_release(struct tc_action *a, int bind);
103u32 tcf_hash_new_index(struct tcf_hashinfo *hinfo); 102u32 tcf_hash_new_index(struct tcf_hashinfo *hinfo);
104int tcf_hash_check(u32 index, struct tc_action *a, int bind); 103int tcf_hash_check(u32 index, struct tc_action *a, int bind);
105int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a, 104int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
@@ -107,6 +106,13 @@ int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
107void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est); 106void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est);
108void tcf_hash_insert(struct tc_action *a); 107void tcf_hash_insert(struct tc_action *a);
109 108
109int __tcf_hash_release(struct tc_action *a, bool bind, bool strict);
110
111static inline int tcf_hash_release(struct tc_action *a, bool bind)
112{
113 return __tcf_hash_release(a, bind, false);
114}
115
110int tcf_register_action(struct tc_action_ops *a, unsigned int mask); 116int tcf_register_action(struct tc_action_ops *a, unsigned int mask);
111int tcf_unregister_action(struct tc_action_ops *a); 117int tcf_unregister_action(struct tc_action_ops *a);
112int tcf_action_destroy(struct list_head *actions, int bind); 118int tcf_action_destroy(struct list_head *actions, int bind);
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index a741678f24a2..883fe1e7c5a1 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -4868,6 +4868,23 @@ bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
4868 struct cfg80211_chan_def *chandef, 4868 struct cfg80211_chan_def *chandef,
4869 enum nl80211_iftype iftype); 4869 enum nl80211_iftype iftype);
4870 4870
4871/**
4872 * cfg80211_reg_can_beacon_relax - check if beaconing is allowed with relaxation
4873 * @wiphy: the wiphy
4874 * @chandef: the channel definition
4875 * @iftype: interface type
4876 *
4877 * Return: %true if there is no secondary channel or the secondary channel(s)
4878 * can be used for beaconing (i.e. is not a radar channel etc.). This version
4879 * also checks if IR-relaxation conditions apply, to allow beaconing under
4880 * more permissive conditions.
4881 *
4882 * Requires the RTNL to be held.
4883 */
4884bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy,
4885 struct cfg80211_chan_def *chandef,
4886 enum nl80211_iftype iftype);
4887
4871/* 4888/*
4872 * cfg80211_ch_switch_notify - update wdev channel and notify userspace 4889 * cfg80211_ch_switch_notify - update wdev channel and notify userspace
4873 * @dev: the device which switched channels 4890 * @dev: the device which switched channels
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index e1300b3dd597..53eead2da743 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -21,13 +21,11 @@ struct netns_frags {
21 * @INET_FRAG_FIRST_IN: first fragment has arrived 21 * @INET_FRAG_FIRST_IN: first fragment has arrived
22 * @INET_FRAG_LAST_IN: final fragment has arrived 22 * @INET_FRAG_LAST_IN: final fragment has arrived
23 * @INET_FRAG_COMPLETE: frag queue has been processed and is due for destruction 23 * @INET_FRAG_COMPLETE: frag queue has been processed and is due for destruction
24 * @INET_FRAG_EVICTED: frag queue is being evicted
25 */ 24 */
26enum { 25enum {
27 INET_FRAG_FIRST_IN = BIT(0), 26 INET_FRAG_FIRST_IN = BIT(0),
28 INET_FRAG_LAST_IN = BIT(1), 27 INET_FRAG_LAST_IN = BIT(1),
29 INET_FRAG_COMPLETE = BIT(2), 28 INET_FRAG_COMPLETE = BIT(2),
30 INET_FRAG_EVICTED = BIT(3)
31}; 29};
32 30
33/** 31/**
@@ -45,6 +43,7 @@ enum {
45 * @flags: fragment queue flags 43 * @flags: fragment queue flags
46 * @max_size: maximum received fragment size 44 * @max_size: maximum received fragment size
47 * @net: namespace that this frag belongs to 45 * @net: namespace that this frag belongs to
46 * @list_evictor: list of queues to forcefully evict (e.g. due to low memory)
48 */ 47 */
49struct inet_frag_queue { 48struct inet_frag_queue {
50 spinlock_t lock; 49 spinlock_t lock;
@@ -59,6 +58,7 @@ struct inet_frag_queue {
59 __u8 flags; 58 __u8 flags;
60 u16 max_size; 59 u16 max_size;
61 struct netns_frags *net; 60 struct netns_frags *net;
61 struct hlist_node list_evictor;
62}; 62};
63 63
64#define INETFRAGS_HASHSZ 1024 64#define INETFRAGS_HASHSZ 1024
@@ -125,6 +125,11 @@ static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f
125 inet_frag_destroy(q, f); 125 inet_frag_destroy(q, f);
126} 126}
127 127
128static inline bool inet_frag_evicting(struct inet_frag_queue *q)
129{
130 return !hlist_unhashed(&q->list_evictor);
131}
132
128/* Memory Tracking Functions. */ 133/* Memory Tracking Functions. */
129 134
130/* The default percpu_counter batch size is not big enough to scale to 135/* The default percpu_counter batch size is not big enough to scale to
@@ -139,14 +144,14 @@ static inline int frag_mem_limit(struct netns_frags *nf)
139 return percpu_counter_read(&nf->mem); 144 return percpu_counter_read(&nf->mem);
140} 145}
141 146
142static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i) 147static inline void sub_frag_mem_limit(struct netns_frags *nf, int i)
143{ 148{
144 __percpu_counter_add(&q->net->mem, -i, frag_percpu_counter_batch); 149 __percpu_counter_add(&nf->mem, -i, frag_percpu_counter_batch);
145} 150}
146 151
147static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i) 152static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
148{ 153{
149 __percpu_counter_add(&q->net->mem, i, frag_percpu_counter_batch); 154 __percpu_counter_add(&nf->mem, i, frag_percpu_counter_batch);
150} 155}
151 156
152static inline void init_frag_mem_limit(struct netns_frags *nf) 157static inline void init_frag_mem_limit(struct netns_frags *nf)
diff --git a/include/net/ip.h b/include/net/ip.h
index 0750a186ea63..d5fe9f2ab699 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -161,6 +161,7 @@ static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk)
161} 161}
162 162
163/* datagram.c */ 163/* datagram.c */
164int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
164int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); 165int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
165 166
166void ip4_datagram_release_cb(struct sock *sk); 167void ip4_datagram_release_cb(struct sock *sk);
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 49c142bdf01e..5fa643b4e891 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -183,7 +183,6 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
183struct fib_table { 183struct fib_table {
184 struct hlist_node tb_hlist; 184 struct hlist_node tb_hlist;
185 u32 tb_id; 185 u32 tb_id;
186 int tb_default;
187 int tb_num_default; 186 int tb_num_default;
188 struct rcu_head rcu; 187 struct rcu_head rcu;
189 unsigned long *tb_data; 188 unsigned long *tb_data;
@@ -290,7 +289,7 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb);
290int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, 289int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
291 u8 tos, int oif, struct net_device *dev, 290 u8 tos, int oif, struct net_device *dev,
292 struct in_device *idev, u32 *itag); 291 struct in_device *idev, u32 *itag);
293void fib_select_default(struct fib_result *res); 292void fib_select_default(const struct flowi4 *flp, struct fib_result *res);
294#ifdef CONFIG_IP_ROUTE_CLASSID 293#ifdef CONFIG_IP_ROUTE_CLASSID
295static inline int fib_num_tclassid_users(struct net *net) 294static inline int fib_num_tclassid_users(struct net *net)
296{ 295{
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 095433b8a8b0..37cd3911d5c5 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -291,7 +291,7 @@ extern unsigned int nf_conntrack_max;
291extern unsigned int nf_conntrack_hash_rnd; 291extern unsigned int nf_conntrack_hash_rnd;
292void init_nf_conntrack_hash_rnd(void); 292void init_nf_conntrack_hash_rnd(void);
293 293
294void nf_conntrack_tmpl_insert(struct net *net, struct nf_conn *tmpl); 294struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags);
295 295
296#define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count) 296#define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count)
297#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count) 297#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index 29d6a94db54d..723b61c82b3f 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -68,7 +68,6 @@ struct ct_pcpu {
68 spinlock_t lock; 68 spinlock_t lock;
69 struct hlist_nulls_head unconfirmed; 69 struct hlist_nulls_head unconfirmed;
70 struct hlist_nulls_head dying; 70 struct hlist_nulls_head dying;
71 struct hlist_nulls_head tmpl;
72}; 71};
73 72
74struct netns_ct { 73struct netns_ct {
diff --git a/include/net/sock.h b/include/net/sock.h
index 05a8c1aea251..f21f0708ec59 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -902,7 +902,7 @@ void sk_stream_kill_queues(struct sock *sk);
902void sk_set_memalloc(struct sock *sk); 902void sk_set_memalloc(struct sock *sk);
903void sk_clear_memalloc(struct sock *sk); 903void sk_clear_memalloc(struct sock *sk);
904 904
905int sk_wait_data(struct sock *sk, long *timeo); 905int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb);
906 906
907struct request_sock_ops; 907struct request_sock_ops;
908struct timewait_sock_ops; 908struct timewait_sock_ops;
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 986fddb08579..b0f898e3b2e7 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1745,6 +1745,7 @@ struct ib_device {
1745 char node_desc[64]; 1745 char node_desc[64];
1746 __be64 node_guid; 1746 __be64 node_guid;
1747 u32 local_dma_lkey; 1747 u32 local_dma_lkey;
1748 u16 is_switch:1;
1748 u8 node_type; 1749 u8 node_type;
1749 u8 phys_port_cnt; 1750 u8 phys_port_cnt;
1750 1751
@@ -1824,6 +1825,20 @@ enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
1824 u8 port_num); 1825 u8 port_num);
1825 1826
1826/** 1827/**
1828 * rdma_cap_ib_switch - Check if the device is IB switch
1829 * @device: Device to check
1830 *
1831 * Device driver is responsible for setting is_switch bit on
1832 * in ib_device structure at init time.
1833 *
1834 * Return: true if the device is IB switch.
1835 */
1836static inline bool rdma_cap_ib_switch(const struct ib_device *device)
1837{
1838 return device->is_switch;
1839}
1840
1841/**
1827 * rdma_start_port - Return the first valid port number for the device 1842 * rdma_start_port - Return the first valid port number for the device
1828 * specified 1843 * specified
1829 * 1844 *
@@ -1833,7 +1848,7 @@ enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
1833 */ 1848 */
1834static inline u8 rdma_start_port(const struct ib_device *device) 1849static inline u8 rdma_start_port(const struct ib_device *device)
1835{ 1850{
1836 return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1; 1851 return rdma_cap_ib_switch(device) ? 0 : 1;
1837} 1852}
1838 1853
1839/** 1854/**
@@ -1846,8 +1861,7 @@ static inline u8 rdma_start_port(const struct ib_device *device)
1846 */ 1861 */
1847static inline u8 rdma_end_port(const struct ib_device *device) 1862static inline u8 rdma_end_port(const struct ib_device *device)
1848{ 1863{
1849 return (device->node_type == RDMA_NODE_IB_SWITCH) ? 1864 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
1850 0 : device->phys_port_cnt;
1851} 1865}
1852 1866
1853static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num) 1867static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
index 4942710ef720..8d1d7fa67ec4 100644
--- a/include/scsi/scsi_eh.h
+++ b/include/scsi/scsi_eh.h
@@ -28,7 +28,6 @@ extern int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len,
28 u64 * info_out); 28 u64 * info_out);
29 29
30extern void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq); 30extern void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq);
31extern void scsi_set_sense_information(u8 *buf, u64 info);
32 31
33extern int scsi_ioctl_reset(struct scsi_device *, int __user *); 32extern int scsi_ioctl_reset(struct scsi_device *, int __user *);
34 33
diff --git a/include/scsi/scsi_transport_srp.h b/include/scsi/scsi_transport_srp.h
index cdb05dd1d440..d40d3ef25707 100644
--- a/include/scsi/scsi_transport_srp.h
+++ b/include/scsi/scsi_transport_srp.h
@@ -119,6 +119,7 @@ extern struct srp_rport *srp_rport_add(struct Scsi_Host *,
119extern void srp_rport_del(struct srp_rport *); 119extern void srp_rport_del(struct srp_rport *);
120extern int srp_tmo_valid(int reconnect_delay, int fast_io_fail_tmo, 120extern int srp_tmo_valid(int reconnect_delay, int fast_io_fail_tmo,
121 int dev_loss_tmo); 121 int dev_loss_tmo);
122int srp_parse_tmo(int *tmo, const char *buf);
122extern int srp_reconnect_rport(struct srp_rport *rport); 123extern int srp_reconnect_rport(struct srp_rport *rport);
123extern void srp_start_tl_fail_timers(struct srp_rport *rport); 124extern void srp_start_tl_fail_timers(struct srp_rport *rport);
124extern void srp_remove_host(struct Scsi_Host *); 125extern void srp_remove_host(struct Scsi_Host *);
diff --git a/include/sound/soc-topology.h b/include/sound/soc-topology.h
index 865a141b118b..427bc41df3ae 100644
--- a/include/sound/soc-topology.h
+++ b/include/sound/soc-topology.h
@@ -141,6 +141,8 @@ struct snd_soc_tplg_ops {
141 int io_ops_count; 141 int io_ops_count;
142}; 142};
143 143
144#ifdef CONFIG_SND_SOC_TOPOLOGY
145
144/* gets a pointer to data from the firmware block header */ 146/* gets a pointer to data from the firmware block header */
145static inline const void *snd_soc_tplg_get_data(struct snd_soc_tplg_hdr *hdr) 147static inline const void *snd_soc_tplg_get_data(struct snd_soc_tplg_hdr *hdr)
146{ 148{
@@ -165,4 +167,14 @@ int snd_soc_tplg_widget_bind_event(struct snd_soc_dapm_widget *w,
165 const struct snd_soc_tplg_widget_events *events, int num_events, 167 const struct snd_soc_tplg_widget_events *events, int num_events,
166 u16 event_type); 168 u16 event_type);
167 169
170#else
171
172static inline int snd_soc_tplg_component_remove(struct snd_soc_component *comp,
173 u32 index)
174{
175 return 0;
176}
177
178#endif
179
168#endif 180#endif
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index 34117b8b72e4..0aedbb2c10e0 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -595,6 +595,7 @@ struct iscsi_conn {
595 int bitmap_id; 595 int bitmap_id;
596 int rx_thread_active; 596 int rx_thread_active;
597 struct task_struct *rx_thread; 597 struct task_struct *rx_thread;
598 struct completion rx_login_comp;
598 int tx_thread_active; 599 int tx_thread_active;
599 struct task_struct *tx_thread; 600 struct task_struct *tx_thread;
600 /* list_head for session connection list */ 601 /* list_head for session connection list */
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index b6fce900a833..fbdd11851725 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -32,7 +32,7 @@
32#ifndef __AMDGPU_DRM_H__ 32#ifndef __AMDGPU_DRM_H__
33#define __AMDGPU_DRM_H__ 33#define __AMDGPU_DRM_H__
34 34
35#include <drm/drm.h> 35#include "drm.h"
36 36
37#define DRM_AMDGPU_GEM_CREATE 0x00 37#define DRM_AMDGPU_GEM_CREATE 0x00
38#define DRM_AMDGPU_GEM_MMAP 0x01 38#define DRM_AMDGPU_GEM_MMAP 0x01
@@ -614,6 +614,8 @@ struct drm_amdgpu_info_device {
614 uint32_t vram_type; 614 uint32_t vram_type;
615 /** video memory bit width*/ 615 /** video memory bit width*/
616 uint32_t vram_bit_width; 616 uint32_t vram_bit_width;
617 /* vce harvesting instance */
618 uint32_t vce_harvest_config;
617}; 619};
618 620
619struct drm_amdgpu_info_hw_ip { 621struct drm_amdgpu_info_hw_ip {
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 6e1a2ed116cb..db809b722985 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -1070,6 +1070,14 @@ struct drm_i915_reg_read {
1070 __u64 offset; 1070 __u64 offset;
1071 __u64 val; /* Return value */ 1071 __u64 val; /* Return value */
1072}; 1072};
1073/* Known registers:
1074 *
1075 * Render engine timestamp - 0x2358 + 64bit - gen7+
1076 * - Note this register returns an invalid value if using the default
1077 * single instruction 8byte read, in order to workaround that use
1078 * offset (0x2538 | 1) instead.
1079 *
1080 */
1073 1081
1074struct drm_i915_reset_stats { 1082struct drm_i915_reset_stats {
1075 __u32 ctx_id; 1083 __u32 ctx_id;
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
index 1ef76661e1a1..01aa2a8e3f8d 100644
--- a/include/uapi/drm/radeon_drm.h
+++ b/include/uapi/drm/radeon_drm.h
@@ -33,7 +33,7 @@
33#ifndef __RADEON_DRM_H__ 33#ifndef __RADEON_DRM_H__
34#define __RADEON_DRM_H__ 34#define __RADEON_DRM_H__
35 35
36#include <drm/drm.h> 36#include "drm.h"
37 37
38/* WARNING: If you change any of these defines, make sure to change the 38/* WARNING: If you change any of these defines, make sure to change the
39 * defines in the X server file (radeon_sarea.h) 39 * defines in the X server file (radeon_sarea.h)
diff --git a/include/uapi/linux/netconf.h b/include/uapi/linux/netconf.h
index 669a1f0b1d97..23cbd34e4ac7 100644
--- a/include/uapi/linux/netconf.h
+++ b/include/uapi/linux/netconf.h
@@ -15,6 +15,7 @@ enum {
15 NETCONFA_RP_FILTER, 15 NETCONFA_RP_FILTER,
16 NETCONFA_MC_FORWARDING, 16 NETCONFA_MC_FORWARDING,
17 NETCONFA_PROXY_NEIGH, 17 NETCONFA_PROXY_NEIGH,
18 NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
18 __NETCONFA_MAX 19 __NETCONFA_MAX
19}; 20};
20#define NETCONFA_MAX (__NETCONFA_MAX - 1) 21#define NETCONFA_MAX (__NETCONFA_MAX - 1)
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index efe3443572ba..413417f3707b 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -319,6 +319,7 @@
319#define PCI_MSIX_PBA 8 /* Pending Bit Array offset */ 319#define PCI_MSIX_PBA 8 /* Pending Bit Array offset */
320#define PCI_MSIX_PBA_BIR 0x00000007 /* BAR index */ 320#define PCI_MSIX_PBA_BIR 0x00000007 /* BAR index */
321#define PCI_MSIX_PBA_OFFSET 0xfffffff8 /* Offset into specified BAR */ 321#define PCI_MSIX_PBA_OFFSET 0xfffffff8 /* Offset into specified BAR */
322#define PCI_MSIX_FLAGS_BIRMASK PCI_MSIX_PBA_BIR /* deprecated */
322#define PCI_CAP_MSIX_SIZEOF 12 /* size of MSIX registers */ 323#define PCI_CAP_MSIX_SIZEOF 12 /* size of MSIX registers */
323 324
324/* MSI-X Table entry format */ 325/* MSI-X Table entry format */
diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h
index 7bbee79ca293..ec32293a00db 100644
--- a/include/uapi/linux/virtio_net.h
+++ b/include/uapi/linux/virtio_net.h
@@ -34,6 +34,7 @@
34/* The feature bitmap for virtio net */ 34/* The feature bitmap for virtio net */
35#define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */ 35#define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */
36#define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */ 36#define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */
37#define VIRTIO_NET_F_CTRL_GUEST_OFFLOADS 2 /* Dynamic offload configuration. */
37#define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */ 38#define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */
38#define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */ 39#define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */
39#define VIRTIO_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */ 40#define VIRTIO_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */
@@ -226,4 +227,19 @@ struct virtio_net_ctrl_mq {
226 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN 1 227 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN 1
227 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX 0x8000 228 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX 0x8000
228 229
230/*
231 * Control network offloads
232 *
233 * Reconfigures the network offloads that Guest can handle.
234 *
235 * Available with the VIRTIO_NET_F_CTRL_GUEST_OFFLOADS feature bit.
236 *
237 * Command data format matches the feature bit mask exactly.
238 *
239 * See VIRTIO_NET_F_GUEST_* for the list of offloads
240 * that can be enabled/disabled.
241 */
242#define VIRTIO_NET_CTRL_GUEST_OFFLOADS 5
243#define VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET 0
244
229#endif /* _LINUX_VIRTIO_NET_H */ 245#endif /* _LINUX_VIRTIO_NET_H */
diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h
index 75301468359f..90007a1abcab 100644
--- a/include/uapi/linux/virtio_pci.h
+++ b/include/uapi/linux/virtio_pci.h
@@ -157,6 +157,12 @@ struct virtio_pci_common_cfg {
157 __le32 queue_used_hi; /* read-write */ 157 __le32 queue_used_hi; /* read-write */
158}; 158};
159 159
160/* Fields in VIRTIO_PCI_CAP_PCI_CFG: */
161struct virtio_pci_cfg_cap {
162 struct virtio_pci_cap cap;
163 __u8 pci_cfg_data[4]; /* Data for BAR access. */
164};
165
160/* Macro versions of offsets for the Old Timers! */ 166/* Macro versions of offsets for the Old Timers! */
161#define VIRTIO_PCI_CAP_VNDR 0 167#define VIRTIO_PCI_CAP_VNDR 0
162#define VIRTIO_PCI_CAP_NEXT 1 168#define VIRTIO_PCI_CAP_NEXT 1
diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h
index 915980ac68df..c07295969b7e 100644
--- a/include/uapi/linux/virtio_ring.h
+++ b/include/uapi/linux/virtio_ring.h
@@ -31,6 +31,9 @@
31 * SUCH DAMAGE. 31 * SUCH DAMAGE.
32 * 32 *
33 * Copyright Rusty Russell IBM Corporation 2007. */ 33 * Copyright Rusty Russell IBM Corporation 2007. */
34#ifndef __KERNEL__
35#include <stdint.h>
36#endif
34#include <linux/types.h> 37#include <linux/types.h>
35#include <linux/virtio_types.h> 38#include <linux/virtio_types.h>
36 39
@@ -143,7 +146,7 @@ static inline void vring_init(struct vring *vr, unsigned int num, void *p,
143 vr->num = num; 146 vr->num = num;
144 vr->desc = p; 147 vr->desc = p;
145 vr->avail = p + num*sizeof(struct vring_desc); 148 vr->avail = p + num*sizeof(struct vring_desc);
146 vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + sizeof(__virtio16) 149 vr->used = (void *)(((uintptr_t)&vr->avail->ring[num] + sizeof(__virtio16)
147 + align-1) & ~(align - 1)); 150 + align-1) & ~(align - 1));
148} 151}
149 152
diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h
index 12215205ab8d..247c50bd60f0 100644
--- a/include/uapi/sound/asoc.h
+++ b/include/uapi/sound/asoc.h
@@ -18,6 +18,12 @@
18#include <linux/types.h> 18#include <linux/types.h>
19#include <sound/asound.h> 19#include <sound/asound.h>
20 20
21#ifndef __KERNEL__
22#error This API is an early revision and not enabled in the current
23#error kernel release, it will be enabled in a future kernel version
24#error with incompatible changes to what is here.
25#endif
26
21/* 27/*
22 * Maximum number of channels topology kcontrol can represent. 28 * Maximum number of channels topology kcontrol can represent.
23 */ 29 */
@@ -77,7 +83,7 @@
77#define SND_SOC_TPLG_NUM_TEXTS 16 83#define SND_SOC_TPLG_NUM_TEXTS 16
78 84
79/* ABI version */ 85/* ABI version */
80#define SND_SOC_TPLG_ABI_VERSION 0x2 86#define SND_SOC_TPLG_ABI_VERSION 0x3
81 87
82/* Max size of TLV data */ 88/* Max size of TLV data */
83#define SND_SOC_TPLG_TLV_SIZE 32 89#define SND_SOC_TPLG_TLV_SIZE 32
@@ -97,7 +103,8 @@
97#define SND_SOC_TPLG_TYPE_PCM 7 103#define SND_SOC_TPLG_TYPE_PCM 7
98#define SND_SOC_TPLG_TYPE_MANIFEST 8 104#define SND_SOC_TPLG_TYPE_MANIFEST 8
99#define SND_SOC_TPLG_TYPE_CODEC_LINK 9 105#define SND_SOC_TPLG_TYPE_CODEC_LINK 9
100#define SND_SOC_TPLG_TYPE_MAX SND_SOC_TPLG_TYPE_CODEC_LINK 106#define SND_SOC_TPLG_TYPE_PDATA 10
107#define SND_SOC_TPLG_TYPE_MAX SND_SOC_TPLG_TYPE_PDATA
101 108
102/* vendor block IDs - please add new vendor types to end */ 109/* vendor block IDs - please add new vendor types to end */
103#define SND_SOC_TPLG_TYPE_VENDOR_FW 1000 110#define SND_SOC_TPLG_TYPE_VENDOR_FW 1000
@@ -110,7 +117,7 @@
110 117
111/* 118/*
112 * Block Header. 119 * Block Header.
113 * This header preceeds all object and object arrays below. 120 * This header precedes all object and object arrays below.
114 */ 121 */
115struct snd_soc_tplg_hdr { 122struct snd_soc_tplg_hdr {
116 __le32 magic; /* magic number */ 123 __le32 magic; /* magic number */
@@ -137,11 +144,19 @@ struct snd_soc_tplg_private {
137/* 144/*
138 * Kcontrol TLV data. 145 * Kcontrol TLV data.
139 */ 146 */
147struct snd_soc_tplg_tlv_dbscale {
148 __le32 min;
149 __le32 step;
150 __le32 mute;
151} __attribute__((packed));
152
140struct snd_soc_tplg_ctl_tlv { 153struct snd_soc_tplg_ctl_tlv {
141 __le32 size; /* in bytes aligned to 4 */ 154 __le32 size; /* in bytes of this structure */
142 __le32 numid; /* control element numeric identification */ 155 __le32 type; /* SNDRV_CTL_TLVT_*, type of TLV */
143 __le32 count; /* number of elem in data array */ 156 union {
144 __le32 data[SND_SOC_TPLG_TLV_SIZE]; 157 __le32 data[SND_SOC_TPLG_TLV_SIZE];
158 struct snd_soc_tplg_tlv_dbscale scale;
159 };
145} __attribute__((packed)); 160} __attribute__((packed));
146 161
147/* 162/*
@@ -155,9 +170,11 @@ struct snd_soc_tplg_channel {
155} __attribute__((packed)); 170} __attribute__((packed));
156 171
157/* 172/*
158 * Kcontrol Operations IDs 173 * Genericl Operations IDs, for binding Kcontrol or Bytes ext ops
174 * Kcontrol ops need get/put/info.
175 * Bytes ext ops need get/put.
159 */ 176 */
160struct snd_soc_tplg_kcontrol_ops_id { 177struct snd_soc_tplg_io_ops {
161 __le32 get; 178 __le32 get;
162 __le32 put; 179 __le32 put;
163 __le32 info; 180 __le32 info;
@@ -171,8 +188,8 @@ struct snd_soc_tplg_ctl_hdr {
171 __le32 type; 188 __le32 type;
172 char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; 189 char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
173 __le32 access; 190 __le32 access;
174 struct snd_soc_tplg_kcontrol_ops_id ops; 191 struct snd_soc_tplg_io_ops ops;
175 __le32 tlv_size; /* non zero means control has TLV data */ 192 struct snd_soc_tplg_ctl_tlv tlv;
176} __attribute__((packed)); 193} __attribute__((packed));
177 194
178/* 195/*
@@ -222,7 +239,7 @@ struct snd_soc_tplg_stream_config {
222/* 239/*
223 * Manifest. List totals for each payload type. Not used in parsing, but will 240 * Manifest. List totals for each payload type. Not used in parsing, but will
224 * be passed to the component driver before any other objects in order for any 241 * be passed to the component driver before any other objects in order for any
225 * global componnent resource allocations. 242 * global component resource allocations.
226 * 243 *
227 * File block representation for manifest :- 244 * File block representation for manifest :-
228 * +-----------------------------------+----+ 245 * +-----------------------------------+----+
@@ -238,6 +255,7 @@ struct snd_soc_tplg_manifest {
238 __le32 graph_elems; /* number of graph elements */ 255 __le32 graph_elems; /* number of graph elements */
239 __le32 dai_elems; /* number of DAI elements */ 256 __le32 dai_elems; /* number of DAI elements */
240 __le32 dai_link_elems; /* number of DAI link elements */ 257 __le32 dai_link_elems; /* number of DAI link elements */
258 struct snd_soc_tplg_private priv;
241} __attribute__((packed)); 259} __attribute__((packed));
242 260
243/* 261/*
@@ -259,7 +277,6 @@ struct snd_soc_tplg_mixer_control {
259 __le32 invert; 277 __le32 invert;
260 __le32 num_channels; 278 __le32 num_channels;
261 struct snd_soc_tplg_channel channel[SND_SOC_TPLG_MAX_CHAN]; 279 struct snd_soc_tplg_channel channel[SND_SOC_TPLG_MAX_CHAN];
262 struct snd_soc_tplg_ctl_tlv tlv;
263 struct snd_soc_tplg_private priv; 280 struct snd_soc_tplg_private priv;
264} __attribute__((packed)); 281} __attribute__((packed));
265 282
@@ -303,6 +320,7 @@ struct snd_soc_tplg_bytes_control {
303 __le32 mask; 320 __le32 mask;
304 __le32 base; 321 __le32 base;
305 __le32 num_regs; 322 __le32 num_regs;
323 struct snd_soc_tplg_io_ops ext_ops;
306 struct snd_soc_tplg_private priv; 324 struct snd_soc_tplg_private priv;
307} __attribute__((packed)); 325} __attribute__((packed));
308 326
@@ -347,6 +365,7 @@ struct snd_soc_tplg_dapm_widget {
347 __le32 reg; /* negative reg = no direct dapm */ 365 __le32 reg; /* negative reg = no direct dapm */
348 __le32 shift; /* bits to shift */ 366 __le32 shift; /* bits to shift */
349 __le32 mask; /* non-shifted mask */ 367 __le32 mask; /* non-shifted mask */
368 __le32 subseq; /* sort within widget type */
350 __u32 invert; /* invert the power bit */ 369 __u32 invert; /* invert the power bit */
351 __u32 ignore_suspend; /* kept enabled over suspend */ 370 __u32 ignore_suspend; /* kept enabled over suspend */
352 __u16 event_flags; 371 __u16 event_flags;
diff --git a/init/main.c b/init/main.c
index c5d5626289ce..56506553d4d8 100644
--- a/init/main.c
+++ b/init/main.c
@@ -656,7 +656,7 @@ asmlinkage __visible void __init start_kernel(void)
656 key_init(); 656 key_init();
657 security_init(); 657 security_init();
658 dbg_late_init(); 658 dbg_late_init();
659 vfs_caches_init(totalram_pages); 659 vfs_caches_init();
660 signals_init(); 660 signals_init();
661 /* rootfs populating might need page-writeback */ 661 /* rootfs populating might need page-writeback */
662 page_writeback_init(); 662 page_writeback_init();
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index a24ba9fe5bb8..161a1807e6ef 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -142,7 +142,6 @@ static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
142 if (!leaf) 142 if (!leaf)
143 return -ENOMEM; 143 return -ENOMEM;
144 INIT_LIST_HEAD(&leaf->msg_list); 144 INIT_LIST_HEAD(&leaf->msg_list);
145 info->qsize += sizeof(*leaf);
146 } 145 }
147 leaf->priority = msg->m_type; 146 leaf->priority = msg->m_type;
148 rb_link_node(&leaf->rb_node, parent, p); 147 rb_link_node(&leaf->rb_node, parent, p);
@@ -187,7 +186,6 @@ try_again:
187 "lazy leaf delete!\n"); 186 "lazy leaf delete!\n");
188 rb_erase(&leaf->rb_node, &info->msg_tree); 187 rb_erase(&leaf->rb_node, &info->msg_tree);
189 if (info->node_cache) { 188 if (info->node_cache) {
190 info->qsize -= sizeof(*leaf);
191 kfree(leaf); 189 kfree(leaf);
192 } else { 190 } else {
193 info->node_cache = leaf; 191 info->node_cache = leaf;
@@ -200,7 +198,6 @@ try_again:
200 if (list_empty(&leaf->msg_list)) { 198 if (list_empty(&leaf->msg_list)) {
201 rb_erase(&leaf->rb_node, &info->msg_tree); 199 rb_erase(&leaf->rb_node, &info->msg_tree);
202 if (info->node_cache) { 200 if (info->node_cache) {
203 info->qsize -= sizeof(*leaf);
204 kfree(leaf); 201 kfree(leaf);
205 } else { 202 } else {
206 info->node_cache = leaf; 203 info->node_cache = leaf;
@@ -1034,7 +1031,6 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
1034 /* Save our speculative allocation into the cache */ 1031 /* Save our speculative allocation into the cache */
1035 INIT_LIST_HEAD(&new_leaf->msg_list); 1032 INIT_LIST_HEAD(&new_leaf->msg_list);
1036 info->node_cache = new_leaf; 1033 info->node_cache = new_leaf;
1037 info->qsize += sizeof(*new_leaf);
1038 new_leaf = NULL; 1034 new_leaf = NULL;
1039 } else { 1035 } else {
1040 kfree(new_leaf); 1036 kfree(new_leaf);
@@ -1142,7 +1138,6 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
1142 /* Save our speculative allocation into the cache */ 1138 /* Save our speculative allocation into the cache */
1143 INIT_LIST_HEAD(&new_leaf->msg_list); 1139 INIT_LIST_HEAD(&new_leaf->msg_list);
1144 info->node_cache = new_leaf; 1140 info->node_cache = new_leaf;
1145 info->qsize += sizeof(*new_leaf);
1146 } else { 1141 } else {
1147 kfree(new_leaf); 1142 kfree(new_leaf);
1148 } 1143 }
diff --git a/ipc/sem.c b/ipc/sem.c
index bc3d530cb23e..b471e5a3863d 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -253,6 +253,16 @@ static void sem_rcu_free(struct rcu_head *head)
253} 253}
254 254
255/* 255/*
256 * spin_unlock_wait() and !spin_is_locked() are not memory barriers, they
257 * are only control barriers.
258 * The code must pair with spin_unlock(&sem->lock) or
259 * spin_unlock(&sem_perm.lock), thus just the control barrier is insufficient.
260 *
261 * smp_rmb() is sufficient, as writes cannot pass the control barrier.
262 */
263#define ipc_smp_acquire__after_spin_is_unlocked() smp_rmb()
264
265/*
256 * Wait until all currently ongoing simple ops have completed. 266 * Wait until all currently ongoing simple ops have completed.
257 * Caller must own sem_perm.lock. 267 * Caller must own sem_perm.lock.
258 * New simple ops cannot start, because simple ops first check 268 * New simple ops cannot start, because simple ops first check
@@ -275,6 +285,7 @@ static void sem_wait_array(struct sem_array *sma)
275 sem = sma->sem_base + i; 285 sem = sma->sem_base + i;
276 spin_unlock_wait(&sem->lock); 286 spin_unlock_wait(&sem->lock);
277 } 287 }
288 ipc_smp_acquire__after_spin_is_unlocked();
278} 289}
279 290
280/* 291/*
@@ -327,13 +338,12 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
327 /* Then check that the global lock is free */ 338 /* Then check that the global lock is free */
328 if (!spin_is_locked(&sma->sem_perm.lock)) { 339 if (!spin_is_locked(&sma->sem_perm.lock)) {
329 /* 340 /*
330 * The ipc object lock check must be visible on all 341 * We need a memory barrier with acquire semantics,
331 * cores before rechecking the complex count. Otherwise 342 * otherwise we can race with another thread that does:
332 * we can race with another thread that does:
333 * complex_count++; 343 * complex_count++;
334 * spin_unlock(sem_perm.lock); 344 * spin_unlock(sem_perm.lock);
335 */ 345 */
336 smp_rmb(); 346 ipc_smp_acquire__after_spin_is_unlocked();
337 347
338 /* 348 /*
339 * Now repeat the test of complex_count: 349 * Now repeat the test of complex_count:
@@ -2074,17 +2084,28 @@ void exit_sem(struct task_struct *tsk)
2074 rcu_read_lock(); 2084 rcu_read_lock();
2075 un = list_entry_rcu(ulp->list_proc.next, 2085 un = list_entry_rcu(ulp->list_proc.next,
2076 struct sem_undo, list_proc); 2086 struct sem_undo, list_proc);
2077 if (&un->list_proc == &ulp->list_proc) 2087 if (&un->list_proc == &ulp->list_proc) {
2078 semid = -1; 2088 /*
2079 else 2089 * We must wait for freeary() before freeing this ulp,
2080 semid = un->semid; 2090 * in case we raced with last sem_undo. There is a small
2091 * possibility where we exit while freeary() didn't
2092 * finish unlocking sem_undo_list.
2093 */
2094 spin_unlock_wait(&ulp->lock);
2095 rcu_read_unlock();
2096 break;
2097 }
2098 spin_lock(&ulp->lock);
2099 semid = un->semid;
2100 spin_unlock(&ulp->lock);
2081 2101
2102 /* exit_sem raced with IPC_RMID, nothing to do */
2082 if (semid == -1) { 2103 if (semid == -1) {
2083 rcu_read_unlock(); 2104 rcu_read_unlock();
2084 break; 2105 continue;
2085 } 2106 }
2086 2107
2087 sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid); 2108 sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
2088 /* exit_sem raced with IPC_RMID, nothing to do */ 2109 /* exit_sem raced with IPC_RMID, nothing to do */
2089 if (IS_ERR(sma)) { 2110 if (IS_ERR(sma)) {
2090 rcu_read_unlock(); 2111 rcu_read_unlock();
@@ -2112,9 +2133,11 @@ void exit_sem(struct task_struct *tsk)
2112 ipc_assert_locked_object(&sma->sem_perm); 2133 ipc_assert_locked_object(&sma->sem_perm);
2113 list_del(&un->list_id); 2134 list_del(&un->list_id);
2114 2135
2115 spin_lock(&ulp->lock); 2136 /* we are the last process using this ulp, acquiring ulp->lock
2137 * isn't required. Besides that, we are also protected against
2138 * IPC_RMID as we hold sma->sem_perm lock now
2139 */
2116 list_del_rcu(&un->list_proc); 2140 list_del_rcu(&un->list_proc);
2117 spin_unlock(&ulp->lock);
2118 2141
2119 /* perform adjustments registered in un */ 2142 /* perform adjustments registered in un */
2120 for (i = 0; i < sma->sem_nsems; i++) { 2143 for (i = 0; i < sma->sem_nsems; i++) {
diff --git a/ipc/shm.c b/ipc/shm.c
index 06e5cf2fe019..4aef24d91b63 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -545,7 +545,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
545 if ((shmflg & SHM_NORESERVE) && 545 if ((shmflg & SHM_NORESERVE) &&
546 sysctl_overcommit_memory != OVERCOMMIT_NEVER) 546 sysctl_overcommit_memory != OVERCOMMIT_NEVER)
547 acctflag = VM_NORESERVE; 547 acctflag = VM_NORESERVE;
548 file = shmem_file_setup(name, size, acctflag); 548 file = shmem_kernel_file_setup(name, size, acctflag);
549 } 549 }
550 error = PTR_ERR(file); 550 error = PTR_ERR(file);
551 if (IS_ERR(file)) 551 if (IS_ERR(file))
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 6a374544d495..5644ec5582b9 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -527,18 +527,9 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen)
527 goto out_notify; 527 goto out_notify;
528 } 528 }
529 529
530 /*
531 * Some architectures have to walk the irq descriptors to
532 * setup the vector space for the cpu which comes online.
533 * Prevent irq alloc/free across the bringup.
534 */
535 irq_lock_sparse();
536
537 /* Arch-specific enabling code. */ 530 /* Arch-specific enabling code. */
538 ret = __cpu_up(cpu, idle); 531 ret = __cpu_up(cpu, idle);
539 532
540 irq_unlock_sparse();
541
542 if (ret != 0) 533 if (ret != 0)
543 goto out_notify; 534 goto out_notify;
544 BUG_ON(!cpu_online(cpu)); 535 BUG_ON(!cpu_online(cpu));
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index ee14e3a35a29..f0acff0f66c9 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1223,7 +1223,7 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
1223 spin_unlock_irq(&callback_lock); 1223 spin_unlock_irq(&callback_lock);
1224 1224
1225 /* use trialcs->mems_allowed as a temp variable */ 1225 /* use trialcs->mems_allowed as a temp variable */
1226 update_nodemasks_hier(cs, &cs->mems_allowed); 1226 update_nodemasks_hier(cs, &trialcs->mems_allowed);
1227done: 1227done:
1228 return retval; 1228 return retval;
1229} 1229}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index d3dae3419b99..e6feb5114134 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1868,8 +1868,6 @@ event_sched_in(struct perf_event *event,
1868 1868
1869 perf_pmu_disable(event->pmu); 1869 perf_pmu_disable(event->pmu);
1870 1870
1871 event->tstamp_running += tstamp - event->tstamp_stopped;
1872
1873 perf_set_shadow_time(event, ctx, tstamp); 1871 perf_set_shadow_time(event, ctx, tstamp);
1874 1872
1875 perf_log_itrace_start(event); 1873 perf_log_itrace_start(event);
@@ -1881,6 +1879,8 @@ event_sched_in(struct perf_event *event,
1881 goto out; 1879 goto out;
1882 } 1880 }
1883 1881
1882 event->tstamp_running += tstamp - event->tstamp_stopped;
1883
1884 if (!is_software_event(event)) 1884 if (!is_software_event(event))
1885 cpuctx->active_oncpu++; 1885 cpuctx->active_oncpu++;
1886 if (!ctx->nr_active++) 1886 if (!ctx->nr_active++)
@@ -3958,28 +3958,21 @@ static void perf_event_for_each(struct perf_event *event,
3958 perf_event_for_each_child(sibling, func); 3958 perf_event_for_each_child(sibling, func);
3959} 3959}
3960 3960
3961static int perf_event_period(struct perf_event *event, u64 __user *arg) 3961struct period_event {
3962{ 3962 struct perf_event *event;
3963 struct perf_event_context *ctx = event->ctx;
3964 int ret = 0, active;
3965 u64 value; 3963 u64 value;
3964};
3966 3965
3967 if (!is_sampling_event(event)) 3966static int __perf_event_period(void *info)
3968 return -EINVAL; 3967{
3969 3968 struct period_event *pe = info;
3970 if (copy_from_user(&value, arg, sizeof(value))) 3969 struct perf_event *event = pe->event;
3971 return -EFAULT; 3970 struct perf_event_context *ctx = event->ctx;
3972 3971 u64 value = pe->value;
3973 if (!value) 3972 bool active;
3974 return -EINVAL;
3975 3973
3976 raw_spin_lock_irq(&ctx->lock); 3974 raw_spin_lock(&ctx->lock);
3977 if (event->attr.freq) { 3975 if (event->attr.freq) {
3978 if (value > sysctl_perf_event_sample_rate) {
3979 ret = -EINVAL;
3980 goto unlock;
3981 }
3982
3983 event->attr.sample_freq = value; 3976 event->attr.sample_freq = value;
3984 } else { 3977 } else {
3985 event->attr.sample_period = value; 3978 event->attr.sample_period = value;
@@ -3998,11 +3991,53 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
3998 event->pmu->start(event, PERF_EF_RELOAD); 3991 event->pmu->start(event, PERF_EF_RELOAD);
3999 perf_pmu_enable(ctx->pmu); 3992 perf_pmu_enable(ctx->pmu);
4000 } 3993 }
3994 raw_spin_unlock(&ctx->lock);
4001 3995
4002unlock: 3996 return 0;
3997}
3998
3999static int perf_event_period(struct perf_event *event, u64 __user *arg)
4000{
4001 struct period_event pe = { .event = event, };
4002 struct perf_event_context *ctx = event->ctx;
4003 struct task_struct *task;
4004 u64 value;
4005
4006 if (!is_sampling_event(event))
4007 return -EINVAL;
4008
4009 if (copy_from_user(&value, arg, sizeof(value)))
4010 return -EFAULT;
4011
4012 if (!value)
4013 return -EINVAL;
4014
4015 if (event->attr.freq && value > sysctl_perf_event_sample_rate)
4016 return -EINVAL;
4017
4018 task = ctx->task;
4019 pe.value = value;
4020
4021 if (!task) {
4022 cpu_function_call(event->cpu, __perf_event_period, &pe);
4023 return 0;
4024 }
4025
4026retry:
4027 if (!task_function_call(task, __perf_event_period, &pe))
4028 return 0;
4029
4030 raw_spin_lock_irq(&ctx->lock);
4031 if (ctx->is_active) {
4032 raw_spin_unlock_irq(&ctx->lock);
4033 task = ctx->task;
4034 goto retry;
4035 }
4036
4037 __perf_event_period(&pe);
4003 raw_spin_unlock_irq(&ctx->lock); 4038 raw_spin_unlock_irq(&ctx->lock);
4004 4039
4005 return ret; 4040 return 0;
4006} 4041}
4007 4042
4008static const struct file_operations perf_fops; 4043static const struct file_operations perf_fops;
@@ -4740,12 +4775,20 @@ static const struct file_operations perf_fops = {
4740 * to user-space before waking everybody up. 4775 * to user-space before waking everybody up.
4741 */ 4776 */
4742 4777
4778static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
4779{
4780 /* only the parent has fasync state */
4781 if (event->parent)
4782 event = event->parent;
4783 return &event->fasync;
4784}
4785
4743void perf_event_wakeup(struct perf_event *event) 4786void perf_event_wakeup(struct perf_event *event)
4744{ 4787{
4745 ring_buffer_wakeup(event); 4788 ring_buffer_wakeup(event);
4746 4789
4747 if (event->pending_kill) { 4790 if (event->pending_kill) {
4748 kill_fasync(&event->fasync, SIGIO, event->pending_kill); 4791 kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
4749 event->pending_kill = 0; 4792 event->pending_kill = 0;
4750 } 4793 }
4751} 4794}
@@ -6124,7 +6167,7 @@ static int __perf_event_overflow(struct perf_event *event,
6124 else 6167 else
6125 perf_event_output(event, data, regs); 6168 perf_event_output(event, data, regs);
6126 6169
6127 if (event->fasync && event->pending_kill) { 6170 if (*perf_event_fasync(event) && event->pending_kill) {
6128 event->pending_wakeup = 1; 6171 event->pending_wakeup = 1;
6129 irq_work_queue(&event->pending); 6172 irq_work_queue(&event->pending);
6130 } 6173 }
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index b2be01b1aa9d..c8aa3f75bc4d 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -559,11 +559,13 @@ static void __rb_free_aux(struct ring_buffer *rb)
559 rb->aux_priv = NULL; 559 rb->aux_priv = NULL;
560 } 560 }
561 561
562 for (pg = 0; pg < rb->aux_nr_pages; pg++) 562 if (rb->aux_nr_pages) {
563 rb_free_aux_page(rb, pg); 563 for (pg = 0; pg < rb->aux_nr_pages; pg++)
564 rb_free_aux_page(rb, pg);
564 565
565 kfree(rb->aux_pages); 566 kfree(rb->aux_pages);
566 rb->aux_nr_pages = 0; 567 rb->aux_nr_pages = 0;
568 }
567} 569}
568 570
569void rb_free_aux(struct ring_buffer *rb) 571void rb_free_aux(struct ring_buffer *rb)
diff --git a/kernel/fork.c b/kernel/fork.c
index 1bfefc6f96a4..dbd9b8d7b7cc 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -287,6 +287,11 @@ static void set_max_threads(unsigned int max_threads_suggested)
287 max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS); 287 max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS);
288} 288}
289 289
290#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
291/* Initialized by the architecture: */
292int arch_task_struct_size __read_mostly;
293#endif
294
290void __init fork_init(void) 295void __init fork_init(void)
291{ 296{
292#ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR 297#ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
@@ -295,7 +300,7 @@ void __init fork_init(void)
295#endif 300#endif
296 /* create a slab on which task_structs can be allocated */ 301 /* create a slab on which task_structs can be allocated */
297 task_struct_cachep = 302 task_struct_cachep =
298 kmem_cache_create("task_struct", sizeof(struct task_struct), 303 kmem_cache_create("task_struct", arch_task_struct_size,
299 ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL); 304 ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
300#endif 305#endif
301 306
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 27f4332c7f84..ae216824e8ca 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -985,6 +985,23 @@ int irq_chip_set_affinity_parent(struct irq_data *data,
985} 985}
986 986
987/** 987/**
988 * irq_chip_set_type_parent - Set IRQ type on the parent interrupt
989 * @data: Pointer to interrupt specific data
990 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
991 *
992 * Conditional, as the underlying parent chip might not implement it.
993 */
994int irq_chip_set_type_parent(struct irq_data *data, unsigned int type)
995{
996 data = data->parent_data;
997
998 if (data->chip->irq_set_type)
999 return data->chip->irq_set_type(data, type);
1000
1001 return -ENOSYS;
1002}
1003
1004/**
988 * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware 1005 * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
989 * @data: Pointer to interrupt specific data 1006 * @data: Pointer to interrupt specific data
990 * 1007 *
@@ -997,7 +1014,7 @@ int irq_chip_retrigger_hierarchy(struct irq_data *data)
997 if (data->chip && data->chip->irq_retrigger) 1014 if (data->chip && data->chip->irq_retrigger)
998 return data->chip->irq_retrigger(data); 1015 return data->chip->irq_retrigger(data);
999 1016
1000 return -ENOSYS; 1017 return 0;
1001} 1018}
1002 1019
1003/** 1020/**
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
index 9065107f083e..7a5237a1bce5 100644
--- a/kernel/irq/resend.c
+++ b/kernel/irq/resend.c
@@ -75,13 +75,21 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq)
75 !desc->irq_data.chip->irq_retrigger(&desc->irq_data)) { 75 !desc->irq_data.chip->irq_retrigger(&desc->irq_data)) {
76#ifdef CONFIG_HARDIRQS_SW_RESEND 76#ifdef CONFIG_HARDIRQS_SW_RESEND
77 /* 77 /*
78 * If the interrupt has a parent irq and runs 78 * If the interrupt is running in the thread
79 * in the thread context of the parent irq, 79 * context of the parent irq we need to be
80 * retrigger the parent. 80 * careful, because we cannot trigger it
81 * directly.
81 */ 82 */
82 if (desc->parent_irq && 83 if (irq_settings_is_nested_thread(desc)) {
83 irq_settings_is_nested_thread(desc)) 84 /*
85 * If the parent_irq is valid, we
86 * retrigger the parent, otherwise we
87 * do nothing.
88 */
89 if (!desc->parent_irq)
90 return;
84 irq = desc->parent_irq; 91 irq = desc->parent_irq;
92 }
85 /* Set it pending and activate the softirq: */ 93 /* Set it pending and activate the softirq: */
86 set_bit(irq, irqs_resend); 94 set_bit(irq, irqs_resend);
87 tasklet_schedule(&resend_tasklet); 95 tasklet_schedule(&resend_tasklet);
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 10e489c448fe..fdea0bee7b5a 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -97,6 +97,7 @@ bool kthread_should_park(void)
97{ 97{
98 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags); 98 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
99} 99}
100EXPORT_SYMBOL_GPL(kthread_should_park);
100 101
101/** 102/**
102 * kthread_freezable_should_stop - should this freezable kthread return now? 103 * kthread_freezable_should_stop - should this freezable kthread return now?
@@ -171,6 +172,7 @@ void kthread_parkme(void)
171{ 172{
172 __kthread_parkme(to_kthread(current)); 173 __kthread_parkme(to_kthread(current));
173} 174}
175EXPORT_SYMBOL_GPL(kthread_parkme);
174 176
175static int kthread(void *_create) 177static int kthread(void *_create)
176{ 178{
@@ -411,6 +413,7 @@ void kthread_unpark(struct task_struct *k)
411 if (kthread) 413 if (kthread)
412 __kthread_unpark(k, kthread); 414 __kthread_unpark(k, kthread);
413} 415}
416EXPORT_SYMBOL_GPL(kthread_unpark);
414 417
415/** 418/**
416 * kthread_park - park a thread created by kthread_create(). 419 * kthread_park - park a thread created by kthread_create().
@@ -441,6 +444,7 @@ int kthread_park(struct task_struct *k)
441 } 444 }
442 return ret; 445 return ret;
443} 446}
447EXPORT_SYMBOL_GPL(kthread_park);
444 448
445/** 449/**
446 * kthread_stop - stop a thread created by kthread_create(). 450 * kthread_stop - stop a thread created by kthread_create().
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
index 04ab18151cc8..df19ae4debd0 100644
--- a/kernel/locking/qspinlock_paravirt.h
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -4,6 +4,7 @@
4 4
5#include <linux/hash.h> 5#include <linux/hash.h>
6#include <linux/bootmem.h> 6#include <linux/bootmem.h>
7#include <linux/debug_locks.h>
7 8
8/* 9/*
9 * Implement paravirt qspinlocks; the general idea is to halt the vcpus instead 10 * Implement paravirt qspinlocks; the general idea is to halt the vcpus instead
@@ -286,15 +287,23 @@ __visible void __pv_queued_spin_unlock(struct qspinlock *lock)
286{ 287{
287 struct __qspinlock *l = (void *)lock; 288 struct __qspinlock *l = (void *)lock;
288 struct pv_node *node; 289 struct pv_node *node;
290 u8 lockval = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0);
289 291
290 /* 292 /*
291 * We must not unlock if SLOW, because in that case we must first 293 * We must not unlock if SLOW, because in that case we must first
292 * unhash. Otherwise it would be possible to have multiple @lock 294 * unhash. Otherwise it would be possible to have multiple @lock
293 * entries, which would be BAD. 295 * entries, which would be BAD.
294 */ 296 */
295 if (likely(cmpxchg(&l->locked, _Q_LOCKED_VAL, 0) == _Q_LOCKED_VAL)) 297 if (likely(lockval == _Q_LOCKED_VAL))
296 return; 298 return;
297 299
300 if (unlikely(lockval != _Q_SLOW_VAL)) {
301 if (debug_locks_silent)
302 return;
303 WARN(1, "pvqspinlock: lock %p has corrupted value 0x%x!\n", lock, atomic_read(&lock->val));
304 return;
305 }
306
298 /* 307 /*
299 * Since the above failed to release, this must be the SLOW path. 308 * Since the above failed to release, this must be the SLOW path.
300 * Therefore start by looking up the blocked node and unhashing it. 309 * Therefore start by looking up the blocked node and unhashing it.
diff --git a/kernel/module.c b/kernel/module.c
index 4d2b82e610e2..b86b7bf1be38 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -602,13 +602,16 @@ const struct kernel_symbol *find_symbol(const char *name,
602} 602}
603EXPORT_SYMBOL_GPL(find_symbol); 603EXPORT_SYMBOL_GPL(find_symbol);
604 604
605/* Search for module by name: must hold module_mutex. */ 605/*
606 * Search for module by name: must hold module_mutex (or preempt disabled
607 * for read-only access).
608 */
606static struct module *find_module_all(const char *name, size_t len, 609static struct module *find_module_all(const char *name, size_t len,
607 bool even_unformed) 610 bool even_unformed)
608{ 611{
609 struct module *mod; 612 struct module *mod;
610 613
611 module_assert_mutex(); 614 module_assert_mutex_or_preempt();
612 615
613 list_for_each_entry(mod, &modules, list) { 616 list_for_each_entry(mod, &modules, list) {
614 if (!even_unformed && mod->state == MODULE_STATE_UNFORMED) 617 if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
@@ -621,6 +624,7 @@ static struct module *find_module_all(const char *name, size_t len,
621 624
622struct module *find_module(const char *name) 625struct module *find_module(const char *name)
623{ 626{
627 module_assert_mutex();
624 return find_module_all(name, strlen(name), false); 628 return find_module_all(name, strlen(name), false);
625} 629}
626EXPORT_SYMBOL_GPL(find_module); 630EXPORT_SYMBOL_GPL(find_module);
diff --git a/kernel/resource.c b/kernel/resource.c
index 90552aab5f2d..fed052a1bc9f 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -504,13 +504,13 @@ int region_is_ram(resource_size_t start, unsigned long size)
504{ 504{
505 struct resource *p; 505 struct resource *p;
506 resource_size_t end = start + size - 1; 506 resource_size_t end = start + size - 1;
507 int flags = IORESOURCE_MEM | IORESOURCE_BUSY; 507 unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
508 const char *name = "System RAM"; 508 const char *name = "System RAM";
509 int ret = -1; 509 int ret = -1;
510 510
511 read_lock(&resource_lock); 511 read_lock(&resource_lock);
512 for (p = iomem_resource.child; p ; p = p->sibling) { 512 for (p = iomem_resource.child; p ; p = p->sibling) {
513 if (end < p->start) 513 if (p->end < start)
514 continue; 514 continue;
515 515
516 if (p->start <= start && end <= p->end) { 516 if (p->start <= start && end <= p->end) {
@@ -521,7 +521,7 @@ int region_is_ram(resource_size_t start, unsigned long size)
521 ret = 1; 521 ret = 1;
522 break; 522 break;
523 } 523 }
524 if (p->end < start) 524 if (end < p->start)
525 break; /* not found */ 525 break; /* not found */
526 } 526 }
527 read_unlock(&resource_lock); 527 read_unlock(&resource_lock);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 65c8f3ebdc3c..d113c3ba8bc4 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3683,7 +3683,7 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
3683 cfs_rq->throttled = 1; 3683 cfs_rq->throttled = 1;
3684 cfs_rq->throttled_clock = rq_clock(rq); 3684 cfs_rq->throttled_clock = rq_clock(rq);
3685 raw_spin_lock(&cfs_b->lock); 3685 raw_spin_lock(&cfs_b->lock);
3686 empty = list_empty(&cfs_rq->throttled_list); 3686 empty = list_empty(&cfs_b->throttled_cfs_rq);
3687 3687
3688 /* 3688 /*
3689 * Add to the _head_ of the list, so that an already-started 3689 * Add to the _head_ of the list, so that an already-started
diff --git a/kernel/signal.c b/kernel/signal.c
index 836df8dac6cc..0f6bbbe77b46 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2748,12 +2748,15 @@ int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
2748 * Other callers might not initialize the si_lsb field, 2748 * Other callers might not initialize the si_lsb field,
2749 * so check explicitly for the right codes here. 2749 * so check explicitly for the right codes here.
2750 */ 2750 */
2751 if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) 2751 if (from->si_signo == SIGBUS &&
2752 (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
2752 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); 2753 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2753#endif 2754#endif
2754#ifdef SEGV_BNDERR 2755#ifdef SEGV_BNDERR
2755 err |= __put_user(from->si_lower, &to->si_lower); 2756 if (from->si_signo == SIGSEGV && from->si_code == SEGV_BNDERR) {
2756 err |= __put_user(from->si_upper, &to->si_upper); 2757 err |= __put_user(from->si_lower, &to->si_lower);
2758 err |= __put_user(from->si_upper, &to->si_upper);
2759 }
2757#endif 2760#endif
2758 break; 2761 break;
2759 case __SI_CHLD: 2762 case __SI_CHLD:
@@ -3017,7 +3020,7 @@ COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3017 int, sig, 3020 int, sig,
3018 struct compat_siginfo __user *, uinfo) 3021 struct compat_siginfo __user *, uinfo)
3019{ 3022{
3020 siginfo_t info; 3023 siginfo_t info = {};
3021 int ret = copy_siginfo_from_user32(&info, uinfo); 3024 int ret = copy_siginfo_from_user32(&info, uinfo);
3022 if (unlikely(ret)) 3025 if (unlikely(ret))
3023 return ret; 3026 return ret;
@@ -3061,7 +3064,7 @@ COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3061 int, sig, 3064 int, sig,
3062 struct compat_siginfo __user *, uinfo) 3065 struct compat_siginfo __user *, uinfo)
3063{ 3066{
3064 siginfo_t info; 3067 siginfo_t info = {};
3065 3068
3066 if (copy_siginfo_from_user32(&info, uinfo)) 3069 if (copy_siginfo_from_user32(&info, uinfo))
3067 return -EFAULT; 3070 return -EFAULT;
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 52b9e199b5ac..f6aae7977824 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -839,7 +839,6 @@ out:
839 raw_spin_unlock(&tick_broadcast_lock); 839 raw_spin_unlock(&tick_broadcast_lock);
840 return ret; 840 return ret;
841} 841}
842EXPORT_SYMBOL_GPL(tick_broadcast_oneshot_control);
843 842
844/* 843/*
845 * Reset the one shot broadcast for a cpu 844 * Reset the one shot broadcast for a cpu
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 55e13efff1ab..f8bf47571dda 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -363,6 +363,7 @@ int tick_broadcast_oneshot_control(enum tick_broadcast_state state)
363 363
364 return __tick_broadcast_oneshot_control(state); 364 return __tick_broadcast_oneshot_control(state);
365} 365}
366EXPORT_SYMBOL_GPL(tick_broadcast_oneshot_control);
366 367
367#ifdef CONFIG_HOTPLUG_CPU 368#ifdef CONFIG_HOTPLUG_CPU
368/* 369/*
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 5e097fa9faf7..84190f02b521 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -807,8 +807,8 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
807 spin_unlock(&base->lock); 807 spin_unlock(&base->lock);
808 base = new_base; 808 base = new_base;
809 spin_lock(&base->lock); 809 spin_lock(&base->lock);
810 timer->flags &= ~TIMER_BASEMASK; 810 WRITE_ONCE(timer->flags,
811 timer->flags |= base->cpu; 811 (timer->flags & ~TIMER_BASEMASK) | base->cpu);
812 } 812 }
813 } 813 }
814 814
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 02bece4a99ea..eb11011b5292 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -98,6 +98,13 @@ struct ftrace_pid {
98 struct pid *pid; 98 struct pid *pid;
99}; 99};
100 100
101static bool ftrace_pids_enabled(void)
102{
103 return !list_empty(&ftrace_pids);
104}
105
106static void ftrace_update_trampoline(struct ftrace_ops *ops);
107
101/* 108/*
102 * ftrace_disabled is set when an anomaly is discovered. 109 * ftrace_disabled is set when an anomaly is discovered.
103 * ftrace_disabled is much stronger than ftrace_enabled. 110 * ftrace_disabled is much stronger than ftrace_enabled.
@@ -109,7 +116,6 @@ static DEFINE_MUTEX(ftrace_lock);
109static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end; 116static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
110static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; 117static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
111ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; 118ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
112ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
113static struct ftrace_ops global_ops; 119static struct ftrace_ops global_ops;
114static struct ftrace_ops control_ops; 120static struct ftrace_ops control_ops;
115 121
@@ -183,14 +189,7 @@ static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
183 if (!test_tsk_trace_trace(current)) 189 if (!test_tsk_trace_trace(current))
184 return; 190 return;
185 191
186 ftrace_pid_function(ip, parent_ip, op, regs); 192 op->saved_func(ip, parent_ip, op, regs);
187}
188
189static void set_ftrace_pid_function(ftrace_func_t func)
190{
191 /* do not set ftrace_pid_function to itself! */
192 if (func != ftrace_pid_func)
193 ftrace_pid_function = func;
194} 193}
195 194
196/** 195/**
@@ -202,7 +201,6 @@ static void set_ftrace_pid_function(ftrace_func_t func)
202void clear_ftrace_function(void) 201void clear_ftrace_function(void)
203{ 202{
204 ftrace_trace_function = ftrace_stub; 203 ftrace_trace_function = ftrace_stub;
205 ftrace_pid_function = ftrace_stub;
206} 204}
207 205
208static void control_ops_disable_all(struct ftrace_ops *ops) 206static void control_ops_disable_all(struct ftrace_ops *ops)
@@ -436,6 +434,12 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
436 } else 434 } else
437 add_ftrace_ops(&ftrace_ops_list, ops); 435 add_ftrace_ops(&ftrace_ops_list, ops);
438 436
437 /* Always save the function, and reset at unregistering */
438 ops->saved_func = ops->func;
439
440 if (ops->flags & FTRACE_OPS_FL_PID && ftrace_pids_enabled())
441 ops->func = ftrace_pid_func;
442
439 ftrace_update_trampoline(ops); 443 ftrace_update_trampoline(ops);
440 444
441 if (ftrace_enabled) 445 if (ftrace_enabled)
@@ -463,15 +467,28 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
463 if (ftrace_enabled) 467 if (ftrace_enabled)
464 update_ftrace_function(); 468 update_ftrace_function();
465 469
470 ops->func = ops->saved_func;
471
466 return 0; 472 return 0;
467} 473}
468 474
469static void ftrace_update_pid_func(void) 475static void ftrace_update_pid_func(void)
470{ 476{
477 bool enabled = ftrace_pids_enabled();
478 struct ftrace_ops *op;
479
471 /* Only do something if we are tracing something */ 480 /* Only do something if we are tracing something */
472 if (ftrace_trace_function == ftrace_stub) 481 if (ftrace_trace_function == ftrace_stub)
473 return; 482 return;
474 483
484 do_for_each_ftrace_op(op, ftrace_ops_list) {
485 if (op->flags & FTRACE_OPS_FL_PID) {
486 op->func = enabled ? ftrace_pid_func :
487 op->saved_func;
488 ftrace_update_trampoline(op);
489 }
490 } while_for_each_ftrace_op(op);
491
475 update_ftrace_function(); 492 update_ftrace_function();
476} 493}
477 494
@@ -1133,7 +1150,8 @@ static struct ftrace_ops global_ops = {
1133 .local_hash.filter_hash = EMPTY_HASH, 1150 .local_hash.filter_hash = EMPTY_HASH,
1134 INIT_OPS_HASH(global_ops) 1151 INIT_OPS_HASH(global_ops)
1135 .flags = FTRACE_OPS_FL_RECURSION_SAFE | 1152 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
1136 FTRACE_OPS_FL_INITIALIZED, 1153 FTRACE_OPS_FL_INITIALIZED |
1154 FTRACE_OPS_FL_PID,
1137}; 1155};
1138 1156
1139/* 1157/*
@@ -5023,7 +5041,9 @@ static void ftrace_update_trampoline(struct ftrace_ops *ops)
5023 5041
5024static struct ftrace_ops global_ops = { 5042static struct ftrace_ops global_ops = {
5025 .func = ftrace_stub, 5043 .func = ftrace_stub,
5026 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, 5044 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
5045 FTRACE_OPS_FL_INITIALIZED |
5046 FTRACE_OPS_FL_PID,
5027}; 5047};
5028 5048
5029static int __init ftrace_nodyn_init(void) 5049static int __init ftrace_nodyn_init(void)
@@ -5080,11 +5100,6 @@ void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
5080 if (WARN_ON(tr->ops->func != ftrace_stub)) 5100 if (WARN_ON(tr->ops->func != ftrace_stub))
5081 printk("ftrace ops had %pS for function\n", 5101 printk("ftrace ops had %pS for function\n",
5082 tr->ops->func); 5102 tr->ops->func);
5083 /* Only the top level instance does pid tracing */
5084 if (!list_empty(&ftrace_pids)) {
5085 set_ftrace_pid_function(func);
5086 func = ftrace_pid_func;
5087 }
5088 } 5103 }
5089 tr->ops->func = func; 5104 tr->ops->func = func;
5090 tr->ops->private = tr; 5105 tr->ops->private = tr;
@@ -5371,7 +5386,7 @@ static void *fpid_start(struct seq_file *m, loff_t *pos)
5371{ 5386{
5372 mutex_lock(&ftrace_lock); 5387 mutex_lock(&ftrace_lock);
5373 5388
5374 if (list_empty(&ftrace_pids) && (!*pos)) 5389 if (!ftrace_pids_enabled() && (!*pos))
5375 return (void *) 1; 5390 return (void *) 1;
5376 5391
5377 return seq_list_start(&ftrace_pids, *pos); 5392 return seq_list_start(&ftrace_pids, *pos);
@@ -5610,6 +5625,7 @@ static struct ftrace_ops graph_ops = {
5610 .func = ftrace_stub, 5625 .func = ftrace_stub,
5611 .flags = FTRACE_OPS_FL_RECURSION_SAFE | 5626 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
5612 FTRACE_OPS_FL_INITIALIZED | 5627 FTRACE_OPS_FL_INITIALIZED |
5628 FTRACE_OPS_FL_PID |
5613 FTRACE_OPS_FL_STUB, 5629 FTRACE_OPS_FL_STUB,
5614#ifdef FTRACE_GRAPH_TRAMP_ADDR 5630#ifdef FTRACE_GRAPH_TRAMP_ADDR
5615 .trampoline = FTRACE_GRAPH_TRAMP_ADDR, 5631 .trampoline = FTRACE_GRAPH_TRAMP_ADDR,
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index f060716b02ae..74bde81601a9 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -444,6 +444,7 @@ enum {
444 444
445 TRACE_CONTROL_BIT, 445 TRACE_CONTROL_BIT,
446 446
447 TRACE_BRANCH_BIT,
447/* 448/*
448 * Abuse of the trace_recursion. 449 * Abuse of the trace_recursion.
449 * As we need a way to maintain state if we are tracing the function 450 * As we need a way to maintain state if we are tracing the function
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index a87b43f49eb4..e2e12ad3186f 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -36,9 +36,12 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
36 struct trace_branch *entry; 36 struct trace_branch *entry;
37 struct ring_buffer *buffer; 37 struct ring_buffer *buffer;
38 unsigned long flags; 38 unsigned long flags;
39 int cpu, pc; 39 int pc;
40 const char *p; 40 const char *p;
41 41
42 if (current->trace_recursion & TRACE_BRANCH_BIT)
43 return;
44
42 /* 45 /*
43 * I would love to save just the ftrace_likely_data pointer, but 46 * I would love to save just the ftrace_likely_data pointer, but
44 * this code can also be used by modules. Ugly things can happen 47 * this code can also be used by modules. Ugly things can happen
@@ -49,10 +52,10 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
49 if (unlikely(!tr)) 52 if (unlikely(!tr))
50 return; 53 return;
51 54
52 local_irq_save(flags); 55 raw_local_irq_save(flags);
53 cpu = raw_smp_processor_id(); 56 current->trace_recursion |= TRACE_BRANCH_BIT;
54 data = per_cpu_ptr(tr->trace_buffer.data, cpu); 57 data = this_cpu_ptr(tr->trace_buffer.data);
55 if (atomic_inc_return(&data->disabled) != 1) 58 if (atomic_read(&data->disabled))
56 goto out; 59 goto out;
57 60
58 pc = preempt_count(); 61 pc = preempt_count();
@@ -81,8 +84,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
81 __buffer_unlock_commit(buffer, event); 84 __buffer_unlock_commit(buffer, event);
82 85
83 out: 86 out:
84 atomic_dec(&data->disabled); 87 current->trace_recursion &= ~TRACE_BRANCH_BIT;
85 local_irq_restore(flags); 88 raw_local_irq_restore(flags);
86} 89}
87 90
88static inline 91static inline
diff --git a/lib/decompress.c b/lib/decompress.c
index 528ff932d8e4..62696dff5730 100644
--- a/lib/decompress.c
+++ b/lib/decompress.c
@@ -59,8 +59,11 @@ decompress_fn __init decompress_method(const unsigned char *inbuf, long len,
59{ 59{
60 const struct compress_format *cf; 60 const struct compress_format *cf;
61 61
62 if (len < 2) 62 if (len < 2) {
63 if (name)
64 *name = NULL;
63 return NULL; /* Need at least this much... */ 65 return NULL; /* Need at least this much... */
66 }
64 67
65 pr_debug("Compressed data magic: %#.2x %#.2x\n", inbuf[0], inbuf[1]); 68 pr_debug("Compressed data magic: %#.2x %#.2x\n", inbuf[0], inbuf[1]);
66 69
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index ae4b65e17e64..dace71fe41f7 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -574,6 +574,9 @@ void debug_dma_assert_idle(struct page *page)
574 unsigned long flags; 574 unsigned long flags;
575 phys_addr_t cln; 575 phys_addr_t cln;
576 576
577 if (dma_debug_disabled())
578 return;
579
577 if (!page) 580 if (!page)
578 return; 581 return;
579 582
diff --git a/lib/hexdump.c b/lib/hexdump.c
index 7ea09699855d..8d74c20d8595 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -11,6 +11,7 @@
11#include <linux/ctype.h> 11#include <linux/ctype.h>
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/export.h> 13#include <linux/export.h>
14#include <asm/unaligned.h>
14 15
15const char hex_asc[] = "0123456789abcdef"; 16const char hex_asc[] = "0123456789abcdef";
16EXPORT_SYMBOL(hex_asc); 17EXPORT_SYMBOL(hex_asc);
@@ -139,7 +140,7 @@ int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize,
139 for (j = 0; j < ngroups; j++) { 140 for (j = 0; j < ngroups; j++) {
140 ret = snprintf(linebuf + lx, linebuflen - lx, 141 ret = snprintf(linebuf + lx, linebuflen - lx,
141 "%s%16.16llx", j ? " " : "", 142 "%s%16.16llx", j ? " " : "",
142 (unsigned long long)*(ptr8 + j)); 143 get_unaligned(ptr8 + j));
143 if (ret >= linebuflen - lx) 144 if (ret >= linebuflen - lx)
144 goto overflow1; 145 goto overflow1;
145 lx += ret; 146 lx += ret;
@@ -150,7 +151,7 @@ int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize,
150 for (j = 0; j < ngroups; j++) { 151 for (j = 0; j < ngroups; j++) {
151 ret = snprintf(linebuf + lx, linebuflen - lx, 152 ret = snprintf(linebuf + lx, linebuflen - lx,
152 "%s%8.8x", j ? " " : "", 153 "%s%8.8x", j ? " " : "",
153 *(ptr4 + j)); 154 get_unaligned(ptr4 + j));
154 if (ret >= linebuflen - lx) 155 if (ret >= linebuflen - lx)
155 goto overflow1; 156 goto overflow1;
156 lx += ret; 157 lx += ret;
@@ -161,7 +162,7 @@ int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize,
161 for (j = 0; j < ngroups; j++) { 162 for (j = 0; j < ngroups; j++) {
162 ret = snprintf(linebuf + lx, linebuflen - lx, 163 ret = snprintf(linebuf + lx, linebuflen - lx,
163 "%s%4.4x", j ? " " : "", 164 "%s%4.4x", j ? " " : "",
164 *(ptr2 + j)); 165 get_unaligned(ptr2 + j));
165 if (ret >= linebuflen - lx) 166 if (ret >= linebuflen - lx)
166 goto overflow1; 167 goto overflow1;
167 lx += ret; 168 lx += ret;
diff --git a/lib/iommu-common.c b/lib/iommu-common.c
index df30632f0bef..ff19f66d3f7f 100644
--- a/lib/iommu-common.c
+++ b/lib/iommu-common.c
@@ -119,7 +119,7 @@ unsigned long iommu_tbl_range_alloc(struct device *dev,
119 unsigned long align_mask = 0; 119 unsigned long align_mask = 0;
120 120
121 if (align_order > 0) 121 if (align_order > 0)
122 align_mask = 0xffffffffffffffffl >> (64 - align_order); 122 align_mask = ~0ul >> (BITS_PER_LONG - align_order);
123 123
124 /* Sanity check */ 124 /* Sanity check */
125 if (unlikely(npages == 0)) { 125 if (unlikely(npages == 0)) {
diff --git a/lib/kobject.c b/lib/kobject.c
index 2e3bd01964a9..3e3a5c3cb330 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -337,8 +337,9 @@ error:
337} 337}
338EXPORT_SYMBOL(kobject_init); 338EXPORT_SYMBOL(kobject_init);
339 339
340static int kobject_add_varg(struct kobject *kobj, struct kobject *parent, 340static __printf(3, 0) int kobject_add_varg(struct kobject *kobj,
341 const char *fmt, va_list vargs) 341 struct kobject *parent,
342 const char *fmt, va_list vargs)
342{ 343{
343 int retval; 344 int retval;
344 345
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index a60a6d335a91..cc0c69710dcf 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -610,6 +610,8 @@ next:
610 iter->skip = 0; 610 iter->skip = 0;
611 } 611 }
612 612
613 iter->p = NULL;
614
613 /* Ensure we see any new tables. */ 615 /* Ensure we see any new tables. */
614 smp_rmb(); 616 smp_rmb();
615 617
@@ -620,8 +622,6 @@ next:
620 return ERR_PTR(-EAGAIN); 622 return ERR_PTR(-EAGAIN);
621 } 623 }
622 624
623 iter->p = NULL;
624
625 return NULL; 625 return NULL;
626} 626}
627EXPORT_SYMBOL_GPL(rhashtable_walk_next); 627EXPORT_SYMBOL_GPL(rhashtable_walk_next);
diff --git a/mm/cma.h b/mm/cma.h
index 1132d733556d..17c75a4246c8 100644
--- a/mm/cma.h
+++ b/mm/cma.h
@@ -16,7 +16,7 @@ struct cma {
16extern struct cma cma_areas[MAX_CMA_AREAS]; 16extern struct cma cma_areas[MAX_CMA_AREAS];
17extern unsigned cma_area_count; 17extern unsigned cma_area_count;
18 18
19static unsigned long cma_bitmap_maxno(struct cma *cma) 19static inline unsigned long cma_bitmap_maxno(struct cma *cma)
20{ 20{
21 return cma->count >> cma->order_per_bit; 21 return cma->count >> cma->order_per_bit;
22} 22}
diff --git a/mm/cma_debug.c b/mm/cma_debug.c
index 7621ee34daa0..f8e4b60db167 100644
--- a/mm/cma_debug.c
+++ b/mm/cma_debug.c
@@ -39,7 +39,7 @@ static int cma_used_get(void *data, u64 *val)
39 39
40 mutex_lock(&cma->lock); 40 mutex_lock(&cma->lock);
41 /* pages counter is smaller than sizeof(int) */ 41 /* pages counter is smaller than sizeof(int) */
42 used = bitmap_weight(cma->bitmap, (int)cma->count); 42 used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma));
43 mutex_unlock(&cma->lock); 43 mutex_unlock(&cma->lock);
44 *val = (u64)used << cma->order_per_bit; 44 *val = (u64)used << cma->order_per_bit;
45 45
@@ -52,13 +52,14 @@ static int cma_maxchunk_get(void *data, u64 *val)
52 struct cma *cma = data; 52 struct cma *cma = data;
53 unsigned long maxchunk = 0; 53 unsigned long maxchunk = 0;
54 unsigned long start, end = 0; 54 unsigned long start, end = 0;
55 unsigned long bitmap_maxno = cma_bitmap_maxno(cma);
55 56
56 mutex_lock(&cma->lock); 57 mutex_lock(&cma->lock);
57 for (;;) { 58 for (;;) {
58 start = find_next_zero_bit(cma->bitmap, cma->count, end); 59 start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end);
59 if (start >= cma->count) 60 if (start >= cma->count)
60 break; 61 break;
61 end = find_next_bit(cma->bitmap, cma->count, start); 62 end = find_next_bit(cma->bitmap, bitmap_maxno, start);
62 maxchunk = max(end - start, maxchunk); 63 maxchunk = max(end - start, maxchunk);
63 } 64 }
64 mutex_unlock(&cma->lock); 65 mutex_unlock(&cma->lock);
@@ -170,10 +171,10 @@ static void cma_debugfs_add_one(struct cma *cma, int idx)
170 171
171 tmp = debugfs_create_dir(name, cma_debugfs_root); 172 tmp = debugfs_create_dir(name, cma_debugfs_root);
172 173
173 debugfs_create_file("alloc", S_IWUSR, cma_debugfs_root, cma, 174 debugfs_create_file("alloc", S_IWUSR, tmp, cma,
174 &cma_alloc_fops); 175 &cma_alloc_fops);
175 176
176 debugfs_create_file("free", S_IWUSR, cma_debugfs_root, cma, 177 debugfs_create_file("free", S_IWUSR, tmp, cma,
177 &cma_free_fops); 178 &cma_free_fops);
178 179
179 debugfs_create_file("base_pfn", S_IRUGO, tmp, 180 debugfs_create_file("base_pfn", S_IRUGO, tmp,
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index c107094f79ba..097c7a4bfbd9 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1676,12 +1676,7 @@ static void __split_huge_page_refcount(struct page *page,
1676 /* after clearing PageTail the gup refcount can be released */ 1676 /* after clearing PageTail the gup refcount can be released */
1677 smp_mb__after_atomic(); 1677 smp_mb__after_atomic();
1678 1678
1679 /* 1679 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1680 * retain hwpoison flag of the poisoned tail page:
1681 * fix for the unsuitable process killed on Guest Machine(KVM)
1682 * by the memory-failure.
1683 */
1684 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON;
1685 page_tail->flags |= (page->flags & 1680 page_tail->flags |= (page->flags &
1686 ((1L << PG_referenced) | 1681 ((1L << PG_referenced) |
1687 (1L << PG_swapbacked) | 1682 (1L << PG_swapbacked) |
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 6c513a63ea84..7b28e9cdf1c7 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -2,7 +2,7 @@
2 * This file contains shadow memory manipulation code. 2 * This file contains shadow memory manipulation code.
3 * 3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd. 4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <a.ryabinin@samsung.com> 5 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
6 * 6 *
7 * Some of code borrowed from https://github.com/xairy/linux by 7 * Some of code borrowed from https://github.com/xairy/linux by
8 * Andrey Konovalov <adech.fo@gmail.com> 8 * Andrey Konovalov <adech.fo@gmail.com>
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 680ceedf810a..e07c94fbd0ac 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -2,7 +2,7 @@
2 * This file contains error reporting code. 2 * This file contains error reporting code.
3 * 3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd. 4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <a.ryabinin@samsung.com> 5 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
6 * 6 *
7 * Some of code borrowed from https://github.com/xairy/linux by 7 * Some of code borrowed from https://github.com/xairy/linux by
8 * Andrey Konovalov <adech.fo@gmail.com> 8 * Andrey Konovalov <adech.fo@gmail.com>
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index c53543d89282..1f4446a90cef 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -909,6 +909,18 @@ int get_hwpoison_page(struct page *page)
909 * directly for tail pages. 909 * directly for tail pages.
910 */ 910 */
911 if (PageTransHuge(head)) { 911 if (PageTransHuge(head)) {
912 /*
913 * Non anonymous thp exists only in allocation/free time. We
914 * can't handle such a case correctly, so let's give it up.
915 * This should be better than triggering BUG_ON when kernel
916 * tries to touch the "partially handled" page.
917 */
918 if (!PageAnon(head)) {
919 pr_err("MCE: %#lx: non anonymous thp\n",
920 page_to_pfn(page));
921 return 0;
922 }
923
912 if (get_page_unless_zero(head)) { 924 if (get_page_unless_zero(head)) {
913 if (PageTail(page)) 925 if (PageTail(page))
914 get_page(page); 926 get_page(page);
@@ -1134,17 +1146,11 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1134 } 1146 }
1135 1147
1136 if (!PageHuge(p) && PageTransHuge(hpage)) { 1148 if (!PageHuge(p) && PageTransHuge(hpage)) {
1137 if (!PageAnon(hpage)) { 1149 if (!PageAnon(hpage) || unlikely(split_huge_page(hpage))) {
1138 pr_err("MCE: %#lx: non anonymous thp\n", pfn); 1150 if (!PageAnon(hpage))
1139 if (TestClearPageHWPoison(p)) 1151 pr_err("MCE: %#lx: non anonymous thp\n", pfn);
1140 atomic_long_sub(nr_pages, &num_poisoned_pages); 1152 else
1141 put_page(p); 1153 pr_err("MCE: %#lx: thp split failed\n", pfn);
1142 if (p != hpage)
1143 put_page(hpage);
1144 return -EBUSY;
1145 }
1146 if (unlikely(split_huge_page(hpage))) {
1147 pr_err("MCE: %#lx: thp split failed\n", pfn);
1148 if (TestClearPageHWPoison(p)) 1154 if (TestClearPageHWPoison(p))
1149 atomic_long_sub(nr_pages, &num_poisoned_pages); 1155 atomic_long_sub(nr_pages, &num_poisoned_pages);
1150 put_page(p); 1156 put_page(p);
@@ -1209,9 +1215,9 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1209 if (!PageHWPoison(p)) { 1215 if (!PageHWPoison(p)) {
1210 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn); 1216 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
1211 atomic_long_sub(nr_pages, &num_poisoned_pages); 1217 atomic_long_sub(nr_pages, &num_poisoned_pages);
1218 unlock_page(hpage);
1212 put_page(hpage); 1219 put_page(hpage);
1213 res = 0; 1220 return 0;
1214 goto out;
1215 } 1221 }
1216 if (hwpoison_filter(p)) { 1222 if (hwpoison_filter(p)) {
1217 if (TestClearPageHWPoison(p)) 1223 if (TestClearPageHWPoison(p))
@@ -1535,6 +1541,8 @@ static int get_any_page(struct page *page, unsigned long pfn, int flags)
1535 */ 1541 */
1536 ret = __get_any_page(page, pfn, 0); 1542 ret = __get_any_page(page, pfn, 0);
1537 if (!PageLRU(page)) { 1543 if (!PageLRU(page)) {
1544 /* Drop page reference which is from __get_any_page() */
1545 put_page(page);
1538 pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n", 1546 pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n",
1539 pfn, page->flags); 1547 pfn, page->flags);
1540 return -EIO; 1548 return -EIO;
@@ -1564,13 +1572,12 @@ static int soft_offline_huge_page(struct page *page, int flags)
1564 unlock_page(hpage); 1572 unlock_page(hpage);
1565 1573
1566 ret = isolate_huge_page(hpage, &pagelist); 1574 ret = isolate_huge_page(hpage, &pagelist);
1567 if (ret) { 1575 /*
1568 /* 1576 * get_any_page() and isolate_huge_page() takes a refcount each,
1569 * get_any_page() and isolate_huge_page() takes a refcount each, 1577 * so need to drop one here.
1570 * so need to drop one here. 1578 */
1571 */ 1579 put_page(hpage);
1572 put_page(hpage); 1580 if (!ret) {
1573 } else {
1574 pr_info("soft offline: %#lx hugepage failed to isolate\n", pfn); 1581 pr_info("soft offline: %#lx hugepage failed to isolate\n", pfn);
1575 return -EBUSY; 1582 return -EBUSY;
1576 } 1583 }
@@ -1656,6 +1663,8 @@ static int __soft_offline_page(struct page *page, int flags)
1656 inc_zone_page_state(page, NR_ISOLATED_ANON + 1663 inc_zone_page_state(page, NR_ISOLATED_ANON +
1657 page_is_file_cache(page)); 1664 page_is_file_cache(page));
1658 list_add(&page->lru, &pagelist); 1665 list_add(&page->lru, &pagelist);
1666 if (!TestSetPageHWPoison(page))
1667 atomic_long_inc(&num_poisoned_pages);
1659 ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL, 1668 ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
1660 MIGRATE_SYNC, MR_MEMORY_FAILURE); 1669 MIGRATE_SYNC, MR_MEMORY_FAILURE);
1661 if (ret) { 1670 if (ret) {
@@ -1670,9 +1679,8 @@ static int __soft_offline_page(struct page *page, int flags)
1670 pfn, ret, page->flags); 1679 pfn, ret, page->flags);
1671 if (ret > 0) 1680 if (ret > 0)
1672 ret = -EIO; 1681 ret = -EIO;
1673 } else { 1682 if (TestClearPageHWPoison(page))
1674 SetPageHWPoison(page); 1683 atomic_long_dec(&num_poisoned_pages);
1675 atomic_long_inc(&num_poisoned_pages);
1676 } 1684 }
1677 } else { 1685 } else {
1678 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n", 1686 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 26fbba7d888f..6da82bcb0a8b 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -446,7 +446,7 @@ static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
446 int nr_pages = PAGES_PER_SECTION; 446 int nr_pages = PAGES_PER_SECTION;
447 int nid = pgdat->node_id; 447 int nid = pgdat->node_id;
448 int zone_type; 448 int zone_type;
449 unsigned long flags; 449 unsigned long flags, pfn;
450 int ret; 450 int ret;
451 451
452 zone_type = zone - pgdat->node_zones; 452 zone_type = zone - pgdat->node_zones;
@@ -461,6 +461,14 @@ static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
461 pgdat_resize_unlock(zone->zone_pgdat, &flags); 461 pgdat_resize_unlock(zone->zone_pgdat, &flags);
462 memmap_init_zone(nr_pages, nid, zone_type, 462 memmap_init_zone(nr_pages, nid, zone_type,
463 phys_start_pfn, MEMMAP_HOTPLUG); 463 phys_start_pfn, MEMMAP_HOTPLUG);
464
465 /* online_page_range is called later and expects pages reserved */
466 for (pfn = phys_start_pfn; pfn < phys_start_pfn + nr_pages; pfn++) {
467 if (!pfn_valid(pfn))
468 continue;
469
470 SetPageReserved(pfn_to_page(pfn));
471 }
464 return 0; 472 return 0;
465} 473}
466 474
@@ -1269,6 +1277,7 @@ int __ref add_memory(int nid, u64 start, u64 size)
1269 1277
1270 /* create new memmap entry */ 1278 /* create new memmap entry */
1271 firmware_map_add_hotplug(start, start + size, "System RAM"); 1279 firmware_map_add_hotplug(start, start + size, "System RAM");
1280 memblock_add_node(start, size, nid);
1272 1281
1273 goto out; 1282 goto out;
1274 1283
@@ -2005,6 +2014,8 @@ void __ref remove_memory(int nid, u64 start, u64 size)
2005 2014
2006 /* remove memmap entry */ 2015 /* remove memmap entry */
2007 firmware_map_remove(start, start + size, "System RAM"); 2016 firmware_map_remove(start, start + size, "System RAM");
2017 memblock_free(start, size);
2018 memblock_remove(start, size);
2008 2019
2009 arch_remove_memory(start, size); 2020 arch_remove_memory(start, size);
2010 2021
diff --git a/mm/migrate.c b/mm/migrate.c
index ee401e4e5ef1..eb4267107d1f 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -880,7 +880,8 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
880 /* Establish migration ptes or remove ptes */ 880 /* Establish migration ptes or remove ptes */
881 if (page_mapped(page)) { 881 if (page_mapped(page)) {
882 try_to_unmap(page, 882 try_to_unmap(page,
883 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); 883 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS|
884 TTU_IGNORE_HWPOISON);
884 page_was_mapped = 1; 885 page_was_mapped = 1;
885 } 886 }
886 887
@@ -950,7 +951,10 @@ out:
950 list_del(&page->lru); 951 list_del(&page->lru);
951 dec_zone_page_state(page, NR_ISOLATED_ANON + 952 dec_zone_page_state(page, NR_ISOLATED_ANON +
952 page_is_file_cache(page)); 953 page_is_file_cache(page));
953 if (reason != MR_MEMORY_FAILURE) 954 /* Soft-offlined page shouldn't go through lru cache list */
955 if (reason == MR_MEMORY_FAILURE)
956 put_page(page);
957 else
954 putback_lru_page(page); 958 putback_lru_page(page);
955 } 959 }
956 960
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 22cddd3e5de8..5cccc127ef81 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2063,10 +2063,10 @@ static struct notifier_block ratelimit_nb = {
2063 */ 2063 */
2064void __init page_writeback_init(void) 2064void __init page_writeback_init(void)
2065{ 2065{
2066 BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL));
2067
2066 writeback_set_ratelimit(); 2068 writeback_set_ratelimit();
2067 register_cpu_notifier(&ratelimit_nb); 2069 register_cpu_notifier(&ratelimit_nb);
2068
2069 BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL));
2070} 2070}
2071 2071
2072/** 2072/**
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 506eac8b38af..5b5240b7f642 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -18,7 +18,6 @@
18#include <linux/mm.h> 18#include <linux/mm.h>
19#include <linux/swap.h> 19#include <linux/swap.h>
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21#include <linux/rwsem.h>
22#include <linux/pagemap.h> 21#include <linux/pagemap.h>
23#include <linux/jiffies.h> 22#include <linux/jiffies.h>
24#include <linux/bootmem.h> 23#include <linux/bootmem.h>
@@ -246,9 +245,7 @@ static inline void reset_deferred_meminit(pg_data_t *pgdat)
246/* Returns true if the struct page for the pfn is uninitialised */ 245/* Returns true if the struct page for the pfn is uninitialised */
247static inline bool __meminit early_page_uninitialised(unsigned long pfn) 246static inline bool __meminit early_page_uninitialised(unsigned long pfn)
248{ 247{
249 int nid = early_pfn_to_nid(pfn); 248 if (pfn >= NODE_DATA(early_pfn_to_nid(pfn))->first_deferred_pfn)
250
251 if (pfn >= NODE_DATA(nid)->first_deferred_pfn)
252 return true; 249 return true;
253 250
254 return false; 251 return false;
@@ -983,21 +980,21 @@ static void __init __free_pages_boot_core(struct page *page,
983 980
984#if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \ 981#if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \
985 defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) 982 defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
986/* Only safe to use early in boot when initialisation is single-threaded */ 983
987static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata; 984static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
988 985
989int __meminit early_pfn_to_nid(unsigned long pfn) 986int __meminit early_pfn_to_nid(unsigned long pfn)
990{ 987{
988 static DEFINE_SPINLOCK(early_pfn_lock);
991 int nid; 989 int nid;
992 990
993 /* The system will behave unpredictably otherwise */ 991 spin_lock(&early_pfn_lock);
994 BUG_ON(system_state != SYSTEM_BOOTING);
995
996 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache); 992 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
997 if (nid >= 0) 993 if (nid < 0)
998 return nid; 994 nid = 0;
999 /* just returns 0 */ 995 spin_unlock(&early_pfn_lock);
1000 return 0; 996
997 return nid;
1001} 998}
1002#endif 999#endif
1003 1000
@@ -1062,7 +1059,15 @@ static void __init deferred_free_range(struct page *page,
1062 __free_pages_boot_core(page, pfn, 0); 1059 __free_pages_boot_core(page, pfn, 0);
1063} 1060}
1064 1061
1065static __initdata DECLARE_RWSEM(pgdat_init_rwsem); 1062/* Completion tracking for deferred_init_memmap() threads */
1063static atomic_t pgdat_init_n_undone __initdata;
1064static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1065
1066static inline void __init pgdat_init_report_one_done(void)
1067{
1068 if (atomic_dec_and_test(&pgdat_init_n_undone))
1069 complete(&pgdat_init_all_done_comp);
1070}
1066 1071
1067/* Initialise remaining memory on a node */ 1072/* Initialise remaining memory on a node */
1068static int __init deferred_init_memmap(void *data) 1073static int __init deferred_init_memmap(void *data)
@@ -1079,7 +1084,7 @@ static int __init deferred_init_memmap(void *data)
1079 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 1084 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
1080 1085
1081 if (first_init_pfn == ULONG_MAX) { 1086 if (first_init_pfn == ULONG_MAX) {
1082 up_read(&pgdat_init_rwsem); 1087 pgdat_init_report_one_done();
1083 return 0; 1088 return 0;
1084 } 1089 }
1085 1090
@@ -1179,7 +1184,8 @@ free_range:
1179 1184
1180 pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages, 1185 pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages,
1181 jiffies_to_msecs(jiffies - start)); 1186 jiffies_to_msecs(jiffies - start));
1182 up_read(&pgdat_init_rwsem); 1187
1188 pgdat_init_report_one_done();
1183 return 0; 1189 return 0;
1184} 1190}
1185 1191
@@ -1187,14 +1193,17 @@ void __init page_alloc_init_late(void)
1187{ 1193{
1188 int nid; 1194 int nid;
1189 1195
1196 /* There will be num_node_state(N_MEMORY) threads */
1197 atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
1190 for_each_node_state(nid, N_MEMORY) { 1198 for_each_node_state(nid, N_MEMORY) {
1191 down_read(&pgdat_init_rwsem);
1192 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid); 1199 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
1193 } 1200 }
1194 1201
1195 /* Block until all are initialised */ 1202 /* Block until all are initialised */
1196 down_write(&pgdat_init_rwsem); 1203 wait_for_completion(&pgdat_init_all_done_comp);
1197 up_write(&pgdat_init_rwsem); 1204
1205 /* Reinit limits that are based on free pages after the kernel is up */
1206 files_maxfiles_init();
1198} 1207}
1199#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 1208#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1200 1209
@@ -1287,6 +1296,10 @@ static inline int check_new_page(struct page *page)
1287 bad_reason = "non-NULL mapping"; 1296 bad_reason = "non-NULL mapping";
1288 if (unlikely(atomic_read(&page->_count) != 0)) 1297 if (unlikely(atomic_read(&page->_count) != 0))
1289 bad_reason = "nonzero _count"; 1298 bad_reason = "nonzero _count";
1299 if (unlikely(page->flags & __PG_HWPOISON)) {
1300 bad_reason = "HWPoisoned (hardware-corrupted)";
1301 bad_flags = __PG_HWPOISON;
1302 }
1290 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) { 1303 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
1291 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set"; 1304 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
1292 bad_flags = PAGE_FLAGS_CHECK_AT_PREP; 1305 bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
@@ -1330,12 +1343,15 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
1330 set_page_owner(page, order, gfp_flags); 1343 set_page_owner(page, order, gfp_flags);
1331 1344
1332 /* 1345 /*
1333 * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was necessary to 1346 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
1334 * allocate the page. The expectation is that the caller is taking 1347 * allocate the page. The expectation is that the caller is taking
1335 * steps that will free more memory. The caller should avoid the page 1348 * steps that will free more memory. The caller should avoid the page
1336 * being used for !PFMEMALLOC purposes. 1349 * being used for !PFMEMALLOC purposes.
1337 */ 1350 */
1338 page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS); 1351 if (alloc_flags & ALLOC_NO_WATERMARKS)
1352 set_page_pfmemalloc(page);
1353 else
1354 clear_page_pfmemalloc(page);
1339 1355
1340 return 0; 1356 return 0;
1341} 1357}
@@ -1950,6 +1966,7 @@ void free_hot_cold_page_list(struct list_head *list, bool cold)
1950void split_page(struct page *page, unsigned int order) 1966void split_page(struct page *page, unsigned int order)
1951{ 1967{
1952 int i; 1968 int i;
1969 gfp_t gfp_mask;
1953 1970
1954 VM_BUG_ON_PAGE(PageCompound(page), page); 1971 VM_BUG_ON_PAGE(PageCompound(page), page);
1955 VM_BUG_ON_PAGE(!page_count(page), page); 1972 VM_BUG_ON_PAGE(!page_count(page), page);
@@ -1963,10 +1980,11 @@ void split_page(struct page *page, unsigned int order)
1963 split_page(virt_to_page(page[0].shadow), order); 1980 split_page(virt_to_page(page[0].shadow), order);
1964#endif 1981#endif
1965 1982
1966 set_page_owner(page, 0, 0); 1983 gfp_mask = get_page_owner_gfp(page);
1984 set_page_owner(page, 0, gfp_mask);
1967 for (i = 1; i < (1 << order); i++) { 1985 for (i = 1; i < (1 << order); i++) {
1968 set_page_refcounted(page + i); 1986 set_page_refcounted(page + i);
1969 set_page_owner(page + i, 0, 0); 1987 set_page_owner(page + i, 0, gfp_mask);
1970 } 1988 }
1971} 1989}
1972EXPORT_SYMBOL_GPL(split_page); 1990EXPORT_SYMBOL_GPL(split_page);
@@ -1996,6 +2014,8 @@ int __isolate_free_page(struct page *page, unsigned int order)
1996 zone->free_area[order].nr_free--; 2014 zone->free_area[order].nr_free--;
1997 rmv_page_order(page); 2015 rmv_page_order(page);
1998 2016
2017 set_page_owner(page, order, __GFP_MOVABLE);
2018
1999 /* Set the pageblock if the isolated page is at least a pageblock */ 2019 /* Set the pageblock if the isolated page is at least a pageblock */
2000 if (order >= pageblock_order - 1) { 2020 if (order >= pageblock_order - 1) {
2001 struct page *endpage = page + (1 << order) - 1; 2021 struct page *endpage = page + (1 << order) - 1;
@@ -2007,7 +2027,7 @@ int __isolate_free_page(struct page *page, unsigned int order)
2007 } 2027 }
2008 } 2028 }
2009 2029
2010 set_page_owner(page, order, 0); 2030
2011 return 1UL << order; 2031 return 1UL << order;
2012} 2032}
2013 2033
@@ -3328,7 +3348,7 @@ refill:
3328 atomic_add(size - 1, &page->_count); 3348 atomic_add(size - 1, &page->_count);
3329 3349
3330 /* reset page count bias and offset to start of new frag */ 3350 /* reset page count bias and offset to start of new frag */
3331 nc->pfmemalloc = page->pfmemalloc; 3351 nc->pfmemalloc = page_is_pfmemalloc(page);
3332 nc->pagecnt_bias = size; 3352 nc->pagecnt_bias = size;
3333 nc->offset = size; 3353 nc->offset = size;
3334 } 3354 }
@@ -5043,6 +5063,10 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid,
5043{ 5063{
5044 unsigned long zone_start_pfn, zone_end_pfn; 5064 unsigned long zone_start_pfn, zone_end_pfn;
5045 5065
5066 /* When hotadd a new node, the node should be empty */
5067 if (!node_start_pfn && !node_end_pfn)
5068 return 0;
5069
5046 /* Get the start and end of the zone */ 5070 /* Get the start and end of the zone */
5047 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type]; 5071 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
5048 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type]; 5072 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
@@ -5106,6 +5130,10 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid,
5106 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; 5130 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
5107 unsigned long zone_start_pfn, zone_end_pfn; 5131 unsigned long zone_start_pfn, zone_end_pfn;
5108 5132
5133 /* When hotadd a new node, the node should be empty */
5134 if (!node_start_pfn && !node_end_pfn)
5135 return 0;
5136
5109 zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); 5137 zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
5110 zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high); 5138 zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
5111 5139
diff --git a/mm/page_owner.c b/mm/page_owner.c
index bd5f842b56d2..983c3a10fa07 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -76,6 +76,13 @@ void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
76 __set_bit(PAGE_EXT_OWNER, &page_ext->flags); 76 __set_bit(PAGE_EXT_OWNER, &page_ext->flags);
77} 77}
78 78
79gfp_t __get_page_owner_gfp(struct page *page)
80{
81 struct page_ext *page_ext = lookup_page_ext(page);
82
83 return page_ext->gfp_mask;
84}
85
79static ssize_t 86static ssize_t
80print_page_owner(char __user *buf, size_t count, unsigned long pfn, 87print_page_owner(char __user *buf, size_t count, unsigned long pfn,
81 struct page *page, struct page_ext *page_ext) 88 struct page *page, struct page_ext *page_ext)
diff --git a/mm/shmem.c b/mm/shmem.c
index 4caf8ed24d65..dbe0c1e8349c 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -3363,8 +3363,8 @@ put_path:
3363 * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be 3363 * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
3364 * kernel internal. There will be NO LSM permission checks against the 3364 * kernel internal. There will be NO LSM permission checks against the
3365 * underlying inode. So users of this interface must do LSM checks at a 3365 * underlying inode. So users of this interface must do LSM checks at a
3366 * higher layer. The one user is the big_key implementation. LSM checks 3366 * higher layer. The users are the big_key and shm implementations. LSM
3367 * are provided at the key level rather than the inode level. 3367 * checks are provided at the key or shm level rather than the inode.
3368 * @name: name for dentry (to be seen in /proc/<pid>/maps 3368 * @name: name for dentry (to be seen in /proc/<pid>/maps
3369 * @size: size to be set for the file 3369 * @size: size to be set for the file
3370 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 3370 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
diff --git a/mm/slab.c b/mm/slab.c
index 200e22412a16..bbd0b47dc6a9 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1603,7 +1603,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
1603 } 1603 }
1604 1604
1605 /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */ 1605 /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
1606 if (unlikely(page->pfmemalloc)) 1606 if (page_is_pfmemalloc(page))
1607 pfmemalloc_active = true; 1607 pfmemalloc_active = true;
1608 1608
1609 nr_pages = (1 << cachep->gfporder); 1609 nr_pages = (1 << cachep->gfporder);
@@ -1614,7 +1614,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
1614 add_zone_page_state(page_zone(page), 1614 add_zone_page_state(page_zone(page),
1615 NR_SLAB_UNRECLAIMABLE, nr_pages); 1615 NR_SLAB_UNRECLAIMABLE, nr_pages);
1616 __SetPageSlab(page); 1616 __SetPageSlab(page);
1617 if (page->pfmemalloc) 1617 if (page_is_pfmemalloc(page))
1618 SetPageSlabPfmemalloc(page); 1618 SetPageSlabPfmemalloc(page);
1619 1619
1620 if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) { 1620 if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 3e5f8f29c286..86831105a09f 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -37,8 +37,7 @@ struct kmem_cache *kmem_cache;
37 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \ 37 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
38 SLAB_FAILSLAB) 38 SLAB_FAILSLAB)
39 39
40#define SLAB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ 40#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | SLAB_NOTRACK)
41 SLAB_CACHE_DMA | SLAB_NOTRACK)
42 41
43/* 42/*
44 * Merge control. If this is set then no merging of slab caches will occur. 43 * Merge control. If this is set then no merging of slab caches will occur.
diff --git a/mm/slub.c b/mm/slub.c
index 816df0016555..f68c0e50f3c0 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1427,7 +1427,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1427 inc_slabs_node(s, page_to_nid(page), page->objects); 1427 inc_slabs_node(s, page_to_nid(page), page->objects);
1428 page->slab_cache = s; 1428 page->slab_cache = s;
1429 __SetPageSlab(page); 1429 __SetPageSlab(page);
1430 if (page->pfmemalloc) 1430 if (page_is_pfmemalloc(page))
1431 SetPageSlabPfmemalloc(page); 1431 SetPageSlabPfmemalloc(page);
1432 1432
1433 start = page_address(page); 1433 start = page_address(page);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index e61445dce04e..8286938c70de 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -973,22 +973,18 @@ static unsigned long shrink_page_list(struct list_head *page_list,
973 * caller can stall after page list has been processed. 973 * caller can stall after page list has been processed.
974 * 974 *
975 * 2) Global or new memcg reclaim encounters a page that is 975 * 2) Global or new memcg reclaim encounters a page that is
976 * not marked for immediate reclaim or the caller does not 976 * not marked for immediate reclaim, or the caller does not
977 * have __GFP_IO. In this case mark the page for immediate 977 * have __GFP_FS (or __GFP_IO if it's simply going to swap,
978 * not to fs). In this case mark the page for immediate
978 * reclaim and continue scanning. 979 * reclaim and continue scanning.
979 * 980 *
980 * __GFP_IO is checked because a loop driver thread might 981 * Require may_enter_fs because we would wait on fs, which
982 * may not have submitted IO yet. And the loop driver might
981 * enter reclaim, and deadlock if it waits on a page for 983 * enter reclaim, and deadlock if it waits on a page for
982 * which it is needed to do the write (loop masks off 984 * which it is needed to do the write (loop masks off
983 * __GFP_IO|__GFP_FS for this reason); but more thought 985 * __GFP_IO|__GFP_FS for this reason); but more thought
984 * would probably show more reasons. 986 * would probably show more reasons.
985 * 987 *
986 * Don't require __GFP_FS, since we're not going into the
987 * FS, just waiting on its writeback completion. Worryingly,
988 * ext4 gfs2 and xfs allocate pages with
989 * grab_cache_page_write_begin(,,AOP_FLAG_NOFS), so testing
990 * may_enter_fs here is liable to OOM on them.
991 *
992 * 3) Legacy memcg encounters a page that is not already marked 988 * 3) Legacy memcg encounters a page that is not already marked
993 * PageReclaim. memcg does not have any dirty pages 989 * PageReclaim. memcg does not have any dirty pages
994 * throttling so we could easily OOM just because too many 990 * throttling so we could easily OOM just because too many
@@ -1005,7 +1001,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
1005 1001
1006 /* Case 2 above */ 1002 /* Case 2 above */
1007 } else if (sane_reclaim(sc) || 1003 } else if (sane_reclaim(sc) ||
1008 !PageReclaim(page) || !(sc->gfp_mask & __GFP_IO)) { 1004 !PageReclaim(page) || !may_enter_fs) {
1009 /* 1005 /*
1010 * This is slightly racy - end_page_writeback() 1006 * This is slightly racy - end_page_writeback()
1011 * might have just cleared PageReclaim, then 1007 * might have just cleared PageReclaim, then
diff --git a/net/9p/client.c b/net/9p/client.c
index 498454b3c06c..ea79ee9a7348 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -1541,6 +1541,7 @@ p9_client_read(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err)
1541 struct p9_client *clnt = fid->clnt; 1541 struct p9_client *clnt = fid->clnt;
1542 struct p9_req_t *req; 1542 struct p9_req_t *req;
1543 int total = 0; 1543 int total = 0;
1544 *err = 0;
1544 1545
1545 p9_debug(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n", 1546 p9_debug(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n",
1546 fid->fid, (unsigned long long) offset, (int)iov_iter_count(to)); 1547 fid->fid, (unsigned long long) offset, (int)iov_iter_count(to));
@@ -1620,6 +1621,7 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
1620 struct p9_client *clnt = fid->clnt; 1621 struct p9_client *clnt = fid->clnt;
1621 struct p9_req_t *req; 1622 struct p9_req_t *req;
1622 int total = 0; 1623 int total = 0;
1624 *err = 0;
1623 1625
1624 p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %zd\n", 1626 p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %zd\n",
1625 fid->fid, (unsigned long long) offset, 1627 fid->fid, (unsigned long long) offset,
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 9dd49ca67dbc..6e70ddb158b4 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -704,6 +704,7 @@ static void p9_virtio_remove(struct virtio_device *vdev)
704 704
705 mutex_unlock(&virtio_9p_lock); 705 mutex_unlock(&virtio_9p_lock);
706 706
707 vdev->config->reset(vdev);
707 vdev->config->del_vqs(vdev); 708 vdev->config->del_vqs(vdev);
708 709
709 sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr); 710 sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr);
diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c
index 1997538a5d23..3b78e8473a01 100644
--- a/net/ax25/ax25_subr.c
+++ b/net/ax25/ax25_subr.c
@@ -264,6 +264,7 @@ void ax25_disconnect(ax25_cb *ax25, int reason)
264{ 264{
265 ax25_clear_queues(ax25); 265 ax25_clear_queues(ax25);
266 266
267 ax25_stop_heartbeat(ax25);
267 ax25_stop_t1timer(ax25); 268 ax25_stop_t1timer(ax25);
268 ax25_stop_t2timer(ax25); 269 ax25_stop_t2timer(ax25);
269 ax25_stop_t3timer(ax25); 270 ax25_stop_t3timer(ax25);
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index fb54e6aed096..6d0b471eede8 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -1138,6 +1138,9 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
1138 * @bat_priv: the bat priv with all the soft interface information 1138 * @bat_priv: the bat priv with all the soft interface information
1139 * @skb: packet to check 1139 * @skb: packet to check
1140 * @hdr_size: size of the encapsulation header 1140 * @hdr_size: size of the encapsulation header
1141 *
1142 * Returns true if the packet was snooped and consumed by DAT. False if the
1143 * packet has to be delivered to the interface
1141 */ 1144 */
1142bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv, 1145bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
1143 struct sk_buff *skb, int hdr_size) 1146 struct sk_buff *skb, int hdr_size)
@@ -1145,7 +1148,7 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
1145 uint16_t type; 1148 uint16_t type;
1146 __be32 ip_src, ip_dst; 1149 __be32 ip_src, ip_dst;
1147 uint8_t *hw_src, *hw_dst; 1150 uint8_t *hw_src, *hw_dst;
1148 bool ret = false; 1151 bool dropped = false;
1149 unsigned short vid; 1152 unsigned short vid;
1150 1153
1151 if (!atomic_read(&bat_priv->distributed_arp_table)) 1154 if (!atomic_read(&bat_priv->distributed_arp_table))
@@ -1174,12 +1177,17 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
1174 /* if this REPLY is directed to a client of mine, let's deliver the 1177 /* if this REPLY is directed to a client of mine, let's deliver the
1175 * packet to the interface 1178 * packet to the interface
1176 */ 1179 */
1177 ret = !batadv_is_my_client(bat_priv, hw_dst, vid); 1180 dropped = !batadv_is_my_client(bat_priv, hw_dst, vid);
1181
1182 /* if this REPLY is sent on behalf of a client of mine, let's drop the
1183 * packet because the client will reply by itself
1184 */
1185 dropped |= batadv_is_my_client(bat_priv, hw_src, vid);
1178out: 1186out:
1179 if (ret) 1187 if (dropped)
1180 kfree_skb(skb); 1188 kfree_skb(skb);
1181 /* if ret == false -> packet has to be delivered to the interface */ 1189 /* if dropped == false -> deliver to the interface */
1182 return ret; 1190 return dropped;
1183} 1191}
1184 1192
1185/** 1193/**
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index bb0158620628..cffa92dd9877 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -439,6 +439,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
439 439
440 INIT_HLIST_NODE(&gw_node->list); 440 INIT_HLIST_NODE(&gw_node->list);
441 gw_node->orig_node = orig_node; 441 gw_node->orig_node = orig_node;
442 gw_node->bandwidth_down = ntohl(gateway->bandwidth_down);
443 gw_node->bandwidth_up = ntohl(gateway->bandwidth_up);
442 atomic_set(&gw_node->refcount, 1); 444 atomic_set(&gw_node->refcount, 1);
443 445
444 spin_lock_bh(&bat_priv->gw.list_lock); 446 spin_lock_bh(&bat_priv->gw.list_lock);
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index c002961da75d..a2fc843c2243 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -479,6 +479,9 @@ out:
479 */ 479 */
480void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *vlan) 480void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *vlan)
481{ 481{
482 if (!vlan)
483 return;
484
482 if (atomic_dec_and_test(&vlan->refcount)) { 485 if (atomic_dec_and_test(&vlan->refcount)) {
483 spin_lock_bh(&vlan->bat_priv->softif_vlan_list_lock); 486 spin_lock_bh(&vlan->bat_priv->softif_vlan_list_lock);
484 hlist_del_rcu(&vlan->list); 487 hlist_del_rcu(&vlan->list);
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index b4824951010b..5809b39c1922 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -594,6 +594,12 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
594 594
595 /* increase the refcounter of the related vlan */ 595 /* increase the refcounter of the related vlan */
596 vlan = batadv_softif_vlan_get(bat_priv, vid); 596 vlan = batadv_softif_vlan_get(bat_priv, vid);
597 if (WARN(!vlan, "adding TT local entry %pM to non-existent VLAN %d",
598 addr, BATADV_PRINT_VID(vid))) {
599 kfree(tt_local);
600 tt_local = NULL;
601 goto out;
602 }
597 603
598 batadv_dbg(BATADV_DBG_TT, bat_priv, 604 batadv_dbg(BATADV_DBG_TT, bat_priv,
599 "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n", 605 "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n",
@@ -1034,6 +1040,7 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
1034 struct batadv_tt_local_entry *tt_local_entry; 1040 struct batadv_tt_local_entry *tt_local_entry;
1035 uint16_t flags, curr_flags = BATADV_NO_FLAGS; 1041 uint16_t flags, curr_flags = BATADV_NO_FLAGS;
1036 struct batadv_softif_vlan *vlan; 1042 struct batadv_softif_vlan *vlan;
1043 void *tt_entry_exists;
1037 1044
1038 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid); 1045 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
1039 if (!tt_local_entry) 1046 if (!tt_local_entry)
@@ -1061,11 +1068,22 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
1061 * immediately purge it 1068 * immediately purge it
1062 */ 1069 */
1063 batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL); 1070 batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL);
1064 hlist_del_rcu(&tt_local_entry->common.hash_entry); 1071
1072 tt_entry_exists = batadv_hash_remove(bat_priv->tt.local_hash,
1073 batadv_compare_tt,
1074 batadv_choose_tt,
1075 &tt_local_entry->common);
1076 if (!tt_entry_exists)
1077 goto out;
1078
1079 /* extra call to free the local tt entry */
1065 batadv_tt_local_entry_free_ref(tt_local_entry); 1080 batadv_tt_local_entry_free_ref(tt_local_entry);
1066 1081
1067 /* decrease the reference held for this vlan */ 1082 /* decrease the reference held for this vlan */
1068 vlan = batadv_softif_vlan_get(bat_priv, vid); 1083 vlan = batadv_softif_vlan_get(bat_priv, vid);
1084 if (!vlan)
1085 goto out;
1086
1069 batadv_softif_vlan_free_ref(vlan); 1087 batadv_softif_vlan_free_ref(vlan);
1070 batadv_softif_vlan_free_ref(vlan); 1088 batadv_softif_vlan_free_ref(vlan);
1071 1089
@@ -1166,8 +1184,10 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
1166 /* decrease the reference held for this vlan */ 1184 /* decrease the reference held for this vlan */
1167 vlan = batadv_softif_vlan_get(bat_priv, 1185 vlan = batadv_softif_vlan_get(bat_priv,
1168 tt_common_entry->vid); 1186 tt_common_entry->vid);
1169 batadv_softif_vlan_free_ref(vlan); 1187 if (vlan) {
1170 batadv_softif_vlan_free_ref(vlan); 1188 batadv_softif_vlan_free_ref(vlan);
1189 batadv_softif_vlan_free_ref(vlan);
1190 }
1171 1191
1172 batadv_tt_local_entry_free_ref(tt_local); 1192 batadv_tt_local_entry_free_ref(tt_local);
1173 } 1193 }
@@ -3207,8 +3227,10 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
3207 3227
3208 /* decrease the reference held for this vlan */ 3228 /* decrease the reference held for this vlan */
3209 vlan = batadv_softif_vlan_get(bat_priv, tt_common->vid); 3229 vlan = batadv_softif_vlan_get(bat_priv, tt_common->vid);
3210 batadv_softif_vlan_free_ref(vlan); 3230 if (vlan) {
3211 batadv_softif_vlan_free_ref(vlan); 3231 batadv_softif_vlan_free_ref(vlan);
3232 batadv_softif_vlan_free_ref(vlan);
3233 }
3212 3234
3213 batadv_tt_local_entry_free_ref(tt_local); 3235 batadv_tt_local_entry_free_ref(tt_local);
3214 } 3236 }
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 7998fb279165..92720f3fe573 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -7820,7 +7820,7 @@ void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
7820 /* Make sure we copy only the significant bytes based on the 7820 /* Make sure we copy only the significant bytes based on the
7821 * encryption key size, and set the rest of the value to zeroes. 7821 * encryption key size, and set the rest of the value to zeroes.
7822 */ 7822 */
7823 memcpy(ev.key.val, key->val, sizeof(key->enc_size)); 7823 memcpy(ev.key.val, key->val, key->enc_size);
7824 memset(ev.key.val + key->enc_size, 0, 7824 memset(ev.key.val + key->enc_size, 0,
7825 sizeof(ev.key.val) - key->enc_size); 7825 sizeof(ev.key.val) - key->enc_size);
7826 7826
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 3d0f7d2a0616..ad82324f710f 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -2312,6 +2312,10 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
2312 return 1; 2312 return 1;
2313 2313
2314 chan = conn->smp; 2314 chan = conn->smp;
2315 if (!chan) {
2316 BT_ERR("SMP security requested but not available");
2317 return 1;
2318 }
2315 2319
2316 if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED)) 2320 if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED))
2317 return 1; 2321 return 1;
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index e97572b5d2cc..fa7bfced888e 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -37,14 +37,30 @@ static inline int should_deliver(const struct net_bridge_port *p,
37 37
38int br_dev_queue_push_xmit(struct sock *sk, struct sk_buff *skb) 38int br_dev_queue_push_xmit(struct sock *sk, struct sk_buff *skb)
39{ 39{
40 if (!is_skb_forwardable(skb->dev, skb)) { 40 if (!is_skb_forwardable(skb->dev, skb))
41 kfree_skb(skb); 41 goto drop;
42 } else { 42
43 skb_push(skb, ETH_HLEN); 43 skb_push(skb, ETH_HLEN);
44 br_drop_fake_rtable(skb); 44 br_drop_fake_rtable(skb);
45 dev_queue_xmit(skb); 45 skb_sender_cpu_clear(skb);
46
47 if (skb->ip_summed == CHECKSUM_PARTIAL &&
48 (skb->protocol == htons(ETH_P_8021Q) ||
49 skb->protocol == htons(ETH_P_8021AD))) {
50 int depth;
51
52 if (!__vlan_get_protocol(skb, skb->protocol, &depth))
53 goto drop;
54
55 skb_set_network_header(skb, depth);
46 } 56 }
47 57
58 dev_queue_xmit(skb);
59
60 return 0;
61
62drop:
63 kfree_skb(skb);
48 return 0; 64 return 0;
49} 65}
50EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit); 66EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit);
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index e29ad70b3000..c94321955db7 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -323,6 +323,7 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
323 struct net_bridge_port_group *p; 323 struct net_bridge_port_group *p;
324 struct net_bridge_port_group __rcu **pp; 324 struct net_bridge_port_group __rcu **pp;
325 struct net_bridge_mdb_htable *mdb; 325 struct net_bridge_mdb_htable *mdb;
326 unsigned long now = jiffies;
326 int err; 327 int err;
327 328
328 mdb = mlock_dereference(br->mdb, br); 329 mdb = mlock_dereference(br->mdb, br);
@@ -347,8 +348,9 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
347 if (unlikely(!p)) 348 if (unlikely(!p))
348 return -ENOMEM; 349 return -ENOMEM;
349 rcu_assign_pointer(*pp, p); 350 rcu_assign_pointer(*pp, p);
351 if (state == MDB_TEMPORARY)
352 mod_timer(&p->timer, now + br->multicast_membership_interval);
350 353
351 br_mdb_notify(br->dev, port, group, RTM_NEWMDB);
352 return 0; 354 return 0;
353} 355}
354 356
@@ -371,6 +373,7 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br,
371 if (!p || p->br != br || p->state == BR_STATE_DISABLED) 373 if (!p || p->br != br || p->state == BR_STATE_DISABLED)
372 return -EINVAL; 374 return -EINVAL;
373 375
376 memset(&ip, 0, sizeof(ip));
374 ip.proto = entry->addr.proto; 377 ip.proto = entry->addr.proto;
375 if (ip.proto == htons(ETH_P_IP)) 378 if (ip.proto == htons(ETH_P_IP))
376 ip.u.ip4 = entry->addr.u.ip4; 379 ip.u.ip4 = entry->addr.u.ip4;
@@ -417,20 +420,14 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
417 if (!netif_running(br->dev) || br->multicast_disabled) 420 if (!netif_running(br->dev) || br->multicast_disabled)
418 return -EINVAL; 421 return -EINVAL;
419 422
423 memset(&ip, 0, sizeof(ip));
420 ip.proto = entry->addr.proto; 424 ip.proto = entry->addr.proto;
421 if (ip.proto == htons(ETH_P_IP)) { 425 if (ip.proto == htons(ETH_P_IP))
422 if (timer_pending(&br->ip4_other_query.timer))
423 return -EBUSY;
424
425 ip.u.ip4 = entry->addr.u.ip4; 426 ip.u.ip4 = entry->addr.u.ip4;
426#if IS_ENABLED(CONFIG_IPV6) 427#if IS_ENABLED(CONFIG_IPV6)
427 } else { 428 else
428 if (timer_pending(&br->ip6_other_query.timer))
429 return -EBUSY;
430
431 ip.u.ip6 = entry->addr.u.ip6; 429 ip.u.ip6 = entry->addr.u.ip6;
432#endif 430#endif
433 }
434 431
435 spin_lock_bh(&br->multicast_lock); 432 spin_lock_bh(&br->multicast_lock);
436 mdb = mlock_dereference(br->mdb, br); 433 mdb = mlock_dereference(br->mdb, br);
@@ -448,6 +445,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
448 if (p->port->state == BR_STATE_DISABLED) 445 if (p->port->state == BR_STATE_DISABLED)
449 goto unlock; 446 goto unlock;
450 447
448 entry->state = p->state;
451 rcu_assign_pointer(*pp, p->next); 449 rcu_assign_pointer(*pp, p->next);
452 hlist_del_init(&p->mglist); 450 hlist_del_init(&p->mglist);
453 del_timer(&p->timer); 451 del_timer(&p->timer);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 742a6c27d7a2..1285eaf5dc22 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -39,6 +39,16 @@ static void br_multicast_start_querier(struct net_bridge *br,
39 struct bridge_mcast_own_query *query); 39 struct bridge_mcast_own_query *query);
40static void br_multicast_add_router(struct net_bridge *br, 40static void br_multicast_add_router(struct net_bridge *br,
41 struct net_bridge_port *port); 41 struct net_bridge_port *port);
42static void br_ip4_multicast_leave_group(struct net_bridge *br,
43 struct net_bridge_port *port,
44 __be32 group,
45 __u16 vid);
46#if IS_ENABLED(CONFIG_IPV6)
47static void br_ip6_multicast_leave_group(struct net_bridge *br,
48 struct net_bridge_port *port,
49 const struct in6_addr *group,
50 __u16 vid);
51#endif
42unsigned int br_mdb_rehash_seq; 52unsigned int br_mdb_rehash_seq;
43 53
44static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) 54static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
@@ -1010,9 +1020,15 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
1010 continue; 1020 continue;
1011 } 1021 }
1012 1022
1013 err = br_ip4_multicast_add_group(br, port, group, vid); 1023 if ((type == IGMPV3_CHANGE_TO_INCLUDE ||
1014 if (err) 1024 type == IGMPV3_MODE_IS_INCLUDE) &&
1015 break; 1025 ntohs(grec->grec_nsrcs) == 0) {
1026 br_ip4_multicast_leave_group(br, port, group, vid);
1027 } else {
1028 err = br_ip4_multicast_add_group(br, port, group, vid);
1029 if (err)
1030 break;
1031 }
1016 } 1032 }
1017 1033
1018 return err; 1034 return err;
@@ -1071,10 +1087,17 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
1071 continue; 1087 continue;
1072 } 1088 }
1073 1089
1074 err = br_ip6_multicast_add_group(br, port, &grec->grec_mca, 1090 if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
1075 vid); 1091 grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
1076 if (err) 1092 ntohs(*nsrcs) == 0) {
1077 break; 1093 br_ip6_multicast_leave_group(br, port, &grec->grec_mca,
1094 vid);
1095 } else {
1096 err = br_ip6_multicast_add_group(br, port,
1097 &grec->grec_mca, vid);
1098 if (!err)
1099 break;
1100 }
1078 } 1101 }
1079 1102
1080 return err; 1103 return err;
@@ -1393,8 +1416,7 @@ br_multicast_leave_group(struct net_bridge *br,
1393 1416
1394 spin_lock(&br->multicast_lock); 1417 spin_lock(&br->multicast_lock);
1395 if (!netif_running(br->dev) || 1418 if (!netif_running(br->dev) ||
1396 (port && port->state == BR_STATE_DISABLED) || 1419 (port && port->state == BR_STATE_DISABLED))
1397 timer_pending(&other_query->timer))
1398 goto out; 1420 goto out;
1399 1421
1400 mdb = mlock_dereference(br->mdb, br); 1422 mdb = mlock_dereference(br->mdb, br);
@@ -1402,6 +1424,31 @@ br_multicast_leave_group(struct net_bridge *br,
1402 if (!mp) 1424 if (!mp)
1403 goto out; 1425 goto out;
1404 1426
1427 if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) {
1428 struct net_bridge_port_group __rcu **pp;
1429
1430 for (pp = &mp->ports;
1431 (p = mlock_dereference(*pp, br)) != NULL;
1432 pp = &p->next) {
1433 if (p->port != port)
1434 continue;
1435
1436 rcu_assign_pointer(*pp, p->next);
1437 hlist_del_init(&p->mglist);
1438 del_timer(&p->timer);
1439 call_rcu_bh(&p->rcu, br_multicast_free_pg);
1440 br_mdb_notify(br->dev, port, group, RTM_DELMDB);
1441
1442 if (!mp->ports && !mp->mglist &&
1443 netif_running(br->dev))
1444 mod_timer(&mp->timer, jiffies);
1445 }
1446 goto out;
1447 }
1448
1449 if (timer_pending(&other_query->timer))
1450 goto out;
1451
1405 if (br->multicast_querier) { 1452 if (br->multicast_querier) {
1406 __br_multicast_send_query(br, port, &mp->addr); 1453 __br_multicast_send_query(br, port, &mp->addr);
1407 1454
@@ -1427,28 +1474,6 @@ br_multicast_leave_group(struct net_bridge *br,
1427 } 1474 }
1428 } 1475 }
1429 1476
1430 if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) {
1431 struct net_bridge_port_group __rcu **pp;
1432
1433 for (pp = &mp->ports;
1434 (p = mlock_dereference(*pp, br)) != NULL;
1435 pp = &p->next) {
1436 if (p->port != port)
1437 continue;
1438
1439 rcu_assign_pointer(*pp, p->next);
1440 hlist_del_init(&p->mglist);
1441 del_timer(&p->timer);
1442 call_rcu_bh(&p->rcu, br_multicast_free_pg);
1443 br_mdb_notify(br->dev, port, group, RTM_DELMDB);
1444
1445 if (!mp->ports && !mp->mglist &&
1446 netif_running(br->dev))
1447 mod_timer(&mp->timer, jiffies);
1448 }
1449 goto out;
1450 }
1451
1452 now = jiffies; 1477 now = jiffies;
1453 time = now + br->multicast_last_member_count * 1478 time = now + br->multicast_last_member_count *
1454 br->multicast_last_member_interval; 1479 br->multicast_last_member_interval;
@@ -1566,7 +1591,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
1566 break; 1591 break;
1567 } 1592 }
1568 1593
1569 if (skb_trimmed) 1594 if (skb_trimmed && skb_trimmed != skb)
1570 kfree_skb(skb_trimmed); 1595 kfree_skb(skb_trimmed);
1571 1596
1572 return err; 1597 return err;
@@ -1611,7 +1636,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1611 break; 1636 break;
1612 } 1637 }
1613 1638
1614 if (skb_trimmed) 1639 if (skb_trimmed && skb_trimmed != skb)
1615 kfree_skb(skb_trimmed); 1640 kfree_skb(skb_trimmed);
1616 1641
1617 return err; 1642 return err;
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index d89f4fac0bc5..c8b9bcfe997e 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -111,7 +111,7 @@ static inline __be16 pppoe_proto(const struct sk_buff *skb)
111/* largest possible L2 header, see br_nf_dev_queue_xmit() */ 111/* largest possible L2 header, see br_nf_dev_queue_xmit() */
112#define NF_BRIDGE_MAX_MAC_HEADER_LENGTH (PPPOE_SES_HLEN + ETH_HLEN) 112#define NF_BRIDGE_MAX_MAC_HEADER_LENGTH (PPPOE_SES_HLEN + ETH_HLEN)
113 113
114#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) 114#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) || IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
115struct brnf_frag_data { 115struct brnf_frag_data {
116 char mac[NF_BRIDGE_MAX_MAC_HEADER_LENGTH]; 116 char mac[NF_BRIDGE_MAX_MAC_HEADER_LENGTH];
117 u8 encap_size; 117 u8 encap_size;
@@ -694,6 +694,7 @@ static int br_nf_push_frag_xmit(struct sock *sk, struct sk_buff *skb)
694} 694}
695#endif 695#endif
696 696
697#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
697static int br_nf_ip_fragment(struct sock *sk, struct sk_buff *skb, 698static int br_nf_ip_fragment(struct sock *sk, struct sk_buff *skb,
698 int (*output)(struct sock *, struct sk_buff *)) 699 int (*output)(struct sock *, struct sk_buff *))
699{ 700{
@@ -712,6 +713,7 @@ static int br_nf_ip_fragment(struct sock *sk, struct sk_buff *skb,
712 713
713 return ip_do_fragment(sk, skb, output); 714 return ip_do_fragment(sk, skb, output);
714} 715}
716#endif
715 717
716static unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb) 718static unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
717{ 719{
@@ -742,7 +744,7 @@ static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb)
742 struct brnf_frag_data *data; 744 struct brnf_frag_data *data;
743 745
744 if (br_validate_ipv4(skb)) 746 if (br_validate_ipv4(skb))
745 return NF_DROP; 747 goto drop;
746 748
747 IPCB(skb)->frag_max_size = nf_bridge->frag_max_size; 749 IPCB(skb)->frag_max_size = nf_bridge->frag_max_size;
748 750
@@ -767,7 +769,7 @@ static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb)
767 struct brnf_frag_data *data; 769 struct brnf_frag_data *data;
768 770
769 if (br_validate_ipv6(skb)) 771 if (br_validate_ipv6(skb))
770 return NF_DROP; 772 goto drop;
771 773
772 IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size; 774 IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size;
773 775
@@ -782,12 +784,16 @@ static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb)
782 784
783 if (v6ops) 785 if (v6ops)
784 return v6ops->fragment(sk, skb, br_nf_push_frag_xmit); 786 return v6ops->fragment(sk, skb, br_nf_push_frag_xmit);
785 else 787
786 return -EMSGSIZE; 788 kfree_skb(skb);
789 return -EMSGSIZE;
787 } 790 }
788#endif 791#endif
789 nf_bridge_info_free(skb); 792 nf_bridge_info_free(skb);
790 return br_dev_queue_push_xmit(sk, skb); 793 return br_dev_queue_push_xmit(sk, skb);
794 drop:
795 kfree_skb(skb);
796 return 0;
791} 797}
792 798
793/* PF_BRIDGE/POST_ROUTING ********************************************/ 799/* PF_BRIDGE/POST_ROUTING ********************************************/
diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c
index 6d12d2675c80..13b7d1e3d185 100644
--- a/net/bridge/br_netfilter_ipv6.c
+++ b/net/bridge/br_netfilter_ipv6.c
@@ -104,7 +104,7 @@ int br_validate_ipv6(struct sk_buff *skb)
104{ 104{
105 const struct ipv6hdr *hdr; 105 const struct ipv6hdr *hdr;
106 struct net_device *dev = skb->dev; 106 struct net_device *dev = skb->dev;
107 struct inet6_dev *idev = in6_dev_get(skb->dev); 107 struct inet6_dev *idev = __in6_dev_get(skb->dev);
108 u32 pkt_len; 108 u32 pkt_len;
109 u8 ip6h_len = sizeof(struct ipv6hdr); 109 u8 ip6h_len = sizeof(struct ipv6hdr);
110 110
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 6b67ed3831de..4d74a0639c4c 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -112,6 +112,8 @@ static inline size_t br_port_info_size(void)
112 + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */ 112 + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */
113 + nla_total_size(1) /* IFLA_BRPORT_LEARNING */ 113 + nla_total_size(1) /* IFLA_BRPORT_LEARNING */
114 + nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */ 114 + nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */
115 + nla_total_size(1) /* IFLA_BRPORT_PROXYARP */
116 + nla_total_size(1) /* IFLA_BRPORT_PROXYARP_WIFI */
115 + 0; 117 + 0;
116} 118}
117 119
@@ -457,6 +459,8 @@ static int br_afspec(struct net_bridge *br,
457 if (nla_len(attr) != sizeof(struct bridge_vlan_info)) 459 if (nla_len(attr) != sizeof(struct bridge_vlan_info))
458 return -EINVAL; 460 return -EINVAL;
459 vinfo = nla_data(attr); 461 vinfo = nla_data(attr);
462 if (!vinfo->vid || vinfo->vid >= VLAN_VID_MASK)
463 return -EINVAL;
460 if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) { 464 if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
461 if (vinfo_start) 465 if (vinfo_start)
462 return -EINVAL; 466 return -EINVAL;
@@ -504,6 +508,8 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
504 [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 }, 508 [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 },
505 [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 }, 509 [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 },
506 [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 }, 510 [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
511 [IFLA_BRPORT_PROXYARP] = { .type = NLA_U8 },
512 [IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 },
507}; 513};
508 514
509/* Change the state of the port and notify spanning tree */ 515/* Change the state of the port and notify spanning tree */
@@ -691,9 +697,17 @@ static int br_port_slave_changelink(struct net_device *brdev,
691 struct nlattr *tb[], 697 struct nlattr *tb[],
692 struct nlattr *data[]) 698 struct nlattr *data[])
693{ 699{
700 struct net_bridge *br = netdev_priv(brdev);
701 int ret;
702
694 if (!data) 703 if (!data)
695 return 0; 704 return 0;
696 return br_setport(br_port_get_rtnl(dev), data); 705
706 spin_lock_bh(&br->lock);
707 ret = br_setport(br_port_get_rtnl(dev), data);
708 spin_unlock_bh(&br->lock);
709
710 return ret;
697} 711}
698 712
699static int br_port_fill_slave_info(struct sk_buff *skb, 713static int br_port_fill_slave_info(struct sk_buff *skb,
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index b4b6dab9c285..ed74ffaa851f 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -209,8 +209,9 @@ void br_transmit_config(struct net_bridge_port *p)
209 br_send_config_bpdu(p, &bpdu); 209 br_send_config_bpdu(p, &bpdu);
210 p->topology_change_ack = 0; 210 p->topology_change_ack = 0;
211 p->config_pending = 0; 211 p->config_pending = 0;
212 mod_timer(&p->hold_timer, 212 if (p->br->stp_enabled == BR_KERNEL_STP)
213 round_jiffies(jiffies + BR_HOLD_TIME)); 213 mod_timer(&p->hold_timer,
214 round_jiffies(jiffies + BR_HOLD_TIME));
214 } 215 }
215} 216}
216 217
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index a2730e7196cd..4ca449a16132 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -48,7 +48,8 @@ void br_stp_enable_bridge(struct net_bridge *br)
48 struct net_bridge_port *p; 48 struct net_bridge_port *p;
49 49
50 spin_lock_bh(&br->lock); 50 spin_lock_bh(&br->lock);
51 mod_timer(&br->hello_timer, jiffies + br->hello_time); 51 if (br->stp_enabled == BR_KERNEL_STP)
52 mod_timer(&br->hello_timer, jiffies + br->hello_time);
52 mod_timer(&br->gc_timer, jiffies + HZ/10); 53 mod_timer(&br->gc_timer, jiffies + HZ/10);
53 54
54 br_config_bpdu_generation(br); 55 br_config_bpdu_generation(br);
@@ -127,6 +128,7 @@ static void br_stp_start(struct net_bridge *br)
127 int r; 128 int r;
128 char *argv[] = { BR_STP_PROG, br->dev->name, "start", NULL }; 129 char *argv[] = { BR_STP_PROG, br->dev->name, "start", NULL };
129 char *envp[] = { NULL }; 130 char *envp[] = { NULL };
131 struct net_bridge_port *p;
130 132
131 r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC); 133 r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
132 134
@@ -140,6 +142,10 @@ static void br_stp_start(struct net_bridge *br)
140 if (r == 0) { 142 if (r == 0) {
141 br->stp_enabled = BR_USER_STP; 143 br->stp_enabled = BR_USER_STP;
142 br_debug(br, "userspace STP started\n"); 144 br_debug(br, "userspace STP started\n");
145 /* Stop hello and hold timers */
146 del_timer(&br->hello_timer);
147 list_for_each_entry(p, &br->port_list, list)
148 del_timer(&p->hold_timer);
143 } else { 149 } else {
144 br->stp_enabled = BR_KERNEL_STP; 150 br->stp_enabled = BR_KERNEL_STP;
145 br_debug(br, "using kernel STP\n"); 151 br_debug(br, "using kernel STP\n");
@@ -156,12 +162,17 @@ static void br_stp_stop(struct net_bridge *br)
156 int r; 162 int r;
157 char *argv[] = { BR_STP_PROG, br->dev->name, "stop", NULL }; 163 char *argv[] = { BR_STP_PROG, br->dev->name, "stop", NULL };
158 char *envp[] = { NULL }; 164 char *envp[] = { NULL };
165 struct net_bridge_port *p;
159 166
160 if (br->stp_enabled == BR_USER_STP) { 167 if (br->stp_enabled == BR_USER_STP) {
161 r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC); 168 r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
162 br_info(br, "userspace STP stopped, return code %d\n", r); 169 br_info(br, "userspace STP stopped, return code %d\n", r);
163 170
164 /* To start timers on any ports left in blocking */ 171 /* To start timers on any ports left in blocking */
172 mod_timer(&br->hello_timer, jiffies + br->hello_time);
173 list_for_each_entry(p, &br->port_list, list)
174 mod_timer(&p->hold_timer,
175 round_jiffies(jiffies + BR_HOLD_TIME));
165 spin_lock_bh(&br->lock); 176 spin_lock_bh(&br->lock);
166 br_port_state_selection(br); 177 br_port_state_selection(br);
167 spin_unlock_bh(&br->lock); 178 spin_unlock_bh(&br->lock);
diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c
index 7caf7fae2d5b..5f0f5af0ec35 100644
--- a/net/bridge/br_stp_timer.c
+++ b/net/bridge/br_stp_timer.c
@@ -40,7 +40,9 @@ static void br_hello_timer_expired(unsigned long arg)
40 if (br->dev->flags & IFF_UP) { 40 if (br->dev->flags & IFF_UP) {
41 br_config_bpdu_generation(br); 41 br_config_bpdu_generation(br);
42 42
43 mod_timer(&br->hello_timer, round_jiffies(jiffies + br->hello_time)); 43 if (br->stp_enabled != BR_USER_STP)
44 mod_timer(&br->hello_timer,
45 round_jiffies(jiffies + br->hello_time));
44 } 46 }
45 spin_unlock(&br->lock); 47 spin_unlock(&br->lock);
46} 48}
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 3cc71b9f5517..cc858919108e 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -121,12 +121,13 @@ static void caif_flow_ctrl(struct sock *sk, int mode)
121 * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are 121 * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are
122 * not dropped, but CAIF is sending flow off instead. 122 * not dropped, but CAIF is sending flow off instead.
123 */ 123 */
124static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 124static void caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
125{ 125{
126 int err; 126 int err;
127 unsigned long flags; 127 unsigned long flags;
128 struct sk_buff_head *list = &sk->sk_receive_queue; 128 struct sk_buff_head *list = &sk->sk_receive_queue;
129 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); 129 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
130 bool queued = false;
130 131
131 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 132 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
132 (unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) { 133 (unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) {
@@ -139,7 +140,8 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
139 140
140 err = sk_filter(sk, skb); 141 err = sk_filter(sk, skb);
141 if (err) 142 if (err)
142 return err; 143 goto out;
144
143 if (!sk_rmem_schedule(sk, skb, skb->truesize) && rx_flow_is_on(cf_sk)) { 145 if (!sk_rmem_schedule(sk, skb, skb->truesize) && rx_flow_is_on(cf_sk)) {
144 set_rx_flow_off(cf_sk); 146 set_rx_flow_off(cf_sk);
145 net_dbg_ratelimited("sending flow OFF due to rmem_schedule\n"); 147 net_dbg_ratelimited("sending flow OFF due to rmem_schedule\n");
@@ -147,21 +149,16 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
147 } 149 }
148 skb->dev = NULL; 150 skb->dev = NULL;
149 skb_set_owner_r(skb, sk); 151 skb_set_owner_r(skb, sk);
150 /* Cache the SKB length before we tack it onto the receive
151 * queue. Once it is added it no longer belongs to us and
152 * may be freed by other threads of control pulling packets
153 * from the queue.
154 */
155 spin_lock_irqsave(&list->lock, flags); 152 spin_lock_irqsave(&list->lock, flags);
156 if (!sock_flag(sk, SOCK_DEAD)) 153 queued = !sock_flag(sk, SOCK_DEAD);
154 if (queued)
157 __skb_queue_tail(list, skb); 155 __skb_queue_tail(list, skb);
158 spin_unlock_irqrestore(&list->lock, flags); 156 spin_unlock_irqrestore(&list->lock, flags);
159 157out:
160 if (!sock_flag(sk, SOCK_DEAD)) 158 if (queued)
161 sk->sk_data_ready(sk); 159 sk->sk_data_ready(sk);
162 else 160 else
163 kfree_skb(skb); 161 kfree_skb(skb);
164 return 0;
165} 162}
166 163
167/* Packet Receive Callback function called from CAIF Stack */ 164/* Packet Receive Callback function called from CAIF Stack */
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 7933e62a7318..166d436196c1 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -89,6 +89,8 @@ struct timer_list can_stattimer; /* timer for statistics update */
89struct s_stats can_stats; /* packet statistics */ 89struct s_stats can_stats; /* packet statistics */
90struct s_pstats can_pstats; /* receive list statistics */ 90struct s_pstats can_pstats; /* receive list statistics */
91 91
92static atomic_t skbcounter = ATOMIC_INIT(0);
93
92/* 94/*
93 * af_can socket functions 95 * af_can socket functions
94 */ 96 */
@@ -310,12 +312,8 @@ int can_send(struct sk_buff *skb, int loop)
310 return err; 312 return err;
311 } 313 }
312 314
313 if (newskb) { 315 if (newskb)
314 if (!(newskb->tstamp.tv64))
315 __net_timestamp(newskb);
316
317 netif_rx_ni(newskb); 316 netif_rx_ni(newskb);
318 }
319 317
320 /* update statistics */ 318 /* update statistics */
321 can_stats.tx_frames++; 319 can_stats.tx_frames++;
@@ -683,6 +681,10 @@ static void can_receive(struct sk_buff *skb, struct net_device *dev)
683 can_stats.rx_frames++; 681 can_stats.rx_frames++;
684 can_stats.rx_frames_delta++; 682 can_stats.rx_frames_delta++;
685 683
684 /* create non-zero unique skb identifier together with *skb */
685 while (!(can_skb_prv(skb)->skbcnt))
686 can_skb_prv(skb)->skbcnt = atomic_inc_return(&skbcounter);
687
686 rcu_read_lock(); 688 rcu_read_lock();
687 689
688 /* deliver the packet to sockets listening on all devices */ 690 /* deliver the packet to sockets listening on all devices */
diff --git a/net/can/bcm.c b/net/can/bcm.c
index b523453585be..a1ba6875c2a2 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -261,6 +261,7 @@ static void bcm_can_tx(struct bcm_op *op)
261 261
262 can_skb_reserve(skb); 262 can_skb_reserve(skb);
263 can_skb_prv(skb)->ifindex = dev->ifindex; 263 can_skb_prv(skb)->ifindex = dev->ifindex;
264 can_skb_prv(skb)->skbcnt = 0;
264 265
265 memcpy(skb_put(skb, CFSIZ), cf, CFSIZ); 266 memcpy(skb_put(skb, CFSIZ), cf, CFSIZ);
266 267
@@ -1217,6 +1218,7 @@ static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk)
1217 } 1218 }
1218 1219
1219 can_skb_prv(skb)->ifindex = dev->ifindex; 1220 can_skb_prv(skb)->ifindex = dev->ifindex;
1221 can_skb_prv(skb)->skbcnt = 0;
1220 skb->dev = dev; 1222 skb->dev = dev;
1221 can_skb_set_owner(skb, sk); 1223 can_skb_set_owner(skb, sk);
1222 err = can_send(skb, 1); /* send with loopback */ 1224 err = can_send(skb, 1); /* send with loopback */
diff --git a/net/can/raw.c b/net/can/raw.c
index 31b9748cbb4e..2e67b1423cd3 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -75,7 +75,7 @@ MODULE_ALIAS("can-proto-1");
75 */ 75 */
76 76
77struct uniqframe { 77struct uniqframe {
78 ktime_t tstamp; 78 int skbcnt;
79 const struct sk_buff *skb; 79 const struct sk_buff *skb;
80 unsigned int join_rx_count; 80 unsigned int join_rx_count;
81}; 81};
@@ -133,7 +133,7 @@ static void raw_rcv(struct sk_buff *oskb, void *data)
133 133
134 /* eliminate multiple filter matches for the same skb */ 134 /* eliminate multiple filter matches for the same skb */
135 if (this_cpu_ptr(ro->uniq)->skb == oskb && 135 if (this_cpu_ptr(ro->uniq)->skb == oskb &&
136 ktime_equal(this_cpu_ptr(ro->uniq)->tstamp, oskb->tstamp)) { 136 this_cpu_ptr(ro->uniq)->skbcnt == can_skb_prv(oskb)->skbcnt) {
137 if (ro->join_filters) { 137 if (ro->join_filters) {
138 this_cpu_inc(ro->uniq->join_rx_count); 138 this_cpu_inc(ro->uniq->join_rx_count);
139 /* drop frame until all enabled filters matched */ 139 /* drop frame until all enabled filters matched */
@@ -144,7 +144,7 @@ static void raw_rcv(struct sk_buff *oskb, void *data)
144 } 144 }
145 } else { 145 } else {
146 this_cpu_ptr(ro->uniq)->skb = oskb; 146 this_cpu_ptr(ro->uniq)->skb = oskb;
147 this_cpu_ptr(ro->uniq)->tstamp = oskb->tstamp; 147 this_cpu_ptr(ro->uniq)->skbcnt = can_skb_prv(oskb)->skbcnt;
148 this_cpu_ptr(ro->uniq)->join_rx_count = 1; 148 this_cpu_ptr(ro->uniq)->join_rx_count = 1;
149 /* drop first frame to check all enabled filters? */ 149 /* drop first frame to check all enabled filters? */
150 if (ro->join_filters && ro->count > 1) 150 if (ro->join_filters && ro->count > 1)
@@ -749,6 +749,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
749 749
750 can_skb_reserve(skb); 750 can_skb_reserve(skb);
751 can_skb_prv(skb)->ifindex = dev->ifindex; 751 can_skb_prv(skb)->ifindex = dev->ifindex;
752 can_skb_prv(skb)->skbcnt = 0;
752 753
753 err = memcpy_from_msg(skb_put(skb, size), msg, size); 754 err = memcpy_from_msg(skb_put(skb, size), msg, size);
754 if (err < 0) 755 if (err < 0)
diff --git a/net/core/datagram.c b/net/core/datagram.c
index b80fb91bb3f7..617088aee21d 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -131,6 +131,35 @@ out_noerr:
131 goto out; 131 goto out;
132} 132}
133 133
134static struct sk_buff *skb_set_peeked(struct sk_buff *skb)
135{
136 struct sk_buff *nskb;
137
138 if (skb->peeked)
139 return skb;
140
141 /* We have to unshare an skb before modifying it. */
142 if (!skb_shared(skb))
143 goto done;
144
145 nskb = skb_clone(skb, GFP_ATOMIC);
146 if (!nskb)
147 return ERR_PTR(-ENOMEM);
148
149 skb->prev->next = nskb;
150 skb->next->prev = nskb;
151 nskb->prev = skb->prev;
152 nskb->next = skb->next;
153
154 consume_skb(skb);
155 skb = nskb;
156
157done:
158 skb->peeked = 1;
159
160 return skb;
161}
162
134/** 163/**
135 * __skb_recv_datagram - Receive a datagram skbuff 164 * __skb_recv_datagram - Receive a datagram skbuff
136 * @sk: socket 165 * @sk: socket
@@ -165,7 +194,9 @@ out_noerr:
165struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags, 194struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
166 int *peeked, int *off, int *err) 195 int *peeked, int *off, int *err)
167{ 196{
197 struct sk_buff_head *queue = &sk->sk_receive_queue;
168 struct sk_buff *skb, *last; 198 struct sk_buff *skb, *last;
199 unsigned long cpu_flags;
169 long timeo; 200 long timeo;
170 /* 201 /*
171 * Caller is allowed not to check sk->sk_err before skb_recv_datagram() 202 * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
@@ -184,8 +215,6 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
184 * Look at current nfs client by the way... 215 * Look at current nfs client by the way...
185 * However, this function was correct in any case. 8) 216 * However, this function was correct in any case. 8)
186 */ 217 */
187 unsigned long cpu_flags;
188 struct sk_buff_head *queue = &sk->sk_receive_queue;
189 int _off = *off; 218 int _off = *off;
190 219
191 last = (struct sk_buff *)queue; 220 last = (struct sk_buff *)queue;
@@ -199,7 +228,12 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
199 _off -= skb->len; 228 _off -= skb->len;
200 continue; 229 continue;
201 } 230 }
202 skb->peeked = 1; 231
232 skb = skb_set_peeked(skb);
233 error = PTR_ERR(skb);
234 if (IS_ERR(skb))
235 goto unlock_err;
236
203 atomic_inc(&skb->users); 237 atomic_inc(&skb->users);
204 } else 238 } else
205 __skb_unlink(skb, queue); 239 __skb_unlink(skb, queue);
@@ -223,6 +257,8 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
223 257
224 return NULL; 258 return NULL;
225 259
260unlock_err:
261 spin_unlock_irqrestore(&queue->lock, cpu_flags);
226no_packet: 262no_packet:
227 *err = error; 263 *err = error;
228 return NULL; 264 return NULL;
@@ -622,7 +658,8 @@ __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
622 !skb->csum_complete_sw) 658 !skb->csum_complete_sw)
623 netdev_rx_csum_fault(skb->dev); 659 netdev_rx_csum_fault(skb->dev);
624 } 660 }
625 skb->csum_valid = !sum; 661 if (!skb_shared(skb))
662 skb->csum_valid = !sum;
626 return sum; 663 return sum;
627} 664}
628EXPORT_SYMBOL(__skb_checksum_complete_head); 665EXPORT_SYMBOL(__skb_checksum_complete_head);
@@ -642,11 +679,13 @@ __sum16 __skb_checksum_complete(struct sk_buff *skb)
642 netdev_rx_csum_fault(skb->dev); 679 netdev_rx_csum_fault(skb->dev);
643 } 680 }
644 681
645 /* Save full packet checksum */ 682 if (!skb_shared(skb)) {
646 skb->csum = csum; 683 /* Save full packet checksum */
647 skb->ip_summed = CHECKSUM_COMPLETE; 684 skb->csum = csum;
648 skb->csum_complete_sw = 1; 685 skb->ip_summed = CHECKSUM_COMPLETE;
649 skb->csum_valid = !sum; 686 skb->csum_complete_sw = 1;
687 skb->csum_valid = !sum;
688 }
650 689
651 return sum; 690 return sum;
652} 691}
diff --git a/net/core/dev.c b/net/core/dev.c
index 6778a9999d52..a8e4dd430285 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -677,10 +677,6 @@ int dev_get_iflink(const struct net_device *dev)
677 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink) 677 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
678 return dev->netdev_ops->ndo_get_iflink(dev); 678 return dev->netdev_ops->ndo_get_iflink(dev);
679 679
680 /* If dev->rtnl_link_ops is set, it's a virtual interface. */
681 if (dev->rtnl_link_ops)
682 return 0;
683
684 return dev->ifindex; 680 return dev->ifindex;
685} 681}
686EXPORT_SYMBOL(dev_get_iflink); 682EXPORT_SYMBOL(dev_get_iflink);
@@ -3452,6 +3448,8 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3452 local_irq_save(flags); 3448 local_irq_save(flags);
3453 3449
3454 rps_lock(sd); 3450 rps_lock(sd);
3451 if (!netif_running(skb->dev))
3452 goto drop;
3455 qlen = skb_queue_len(&sd->input_pkt_queue); 3453 qlen = skb_queue_len(&sd->input_pkt_queue);
3456 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) { 3454 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
3457 if (qlen) { 3455 if (qlen) {
@@ -3473,6 +3471,7 @@ enqueue:
3473 goto enqueue; 3471 goto enqueue;
3474 } 3472 }
3475 3473
3474drop:
3476 sd->dropped++; 3475 sd->dropped++;
3477 rps_unlock(sd); 3476 rps_unlock(sd);
3478 3477
@@ -3775,8 +3774,6 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
3775 3774
3776 pt_prev = NULL; 3775 pt_prev = NULL;
3777 3776
3778 rcu_read_lock();
3779
3780another_round: 3777another_round:
3781 skb->skb_iif = skb->dev->ifindex; 3778 skb->skb_iif = skb->dev->ifindex;
3782 3779
@@ -3786,7 +3783,7 @@ another_round:
3786 skb->protocol == cpu_to_be16(ETH_P_8021AD)) { 3783 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
3787 skb = skb_vlan_untag(skb); 3784 skb = skb_vlan_untag(skb);
3788 if (unlikely(!skb)) 3785 if (unlikely(!skb))
3789 goto unlock; 3786 goto out;
3790 } 3787 }
3791 3788
3792#ifdef CONFIG_NET_CLS_ACT 3789#ifdef CONFIG_NET_CLS_ACT
@@ -3816,10 +3813,10 @@ skip_taps:
3816 if (static_key_false(&ingress_needed)) { 3813 if (static_key_false(&ingress_needed)) {
3817 skb = handle_ing(skb, &pt_prev, &ret, orig_dev); 3814 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3818 if (!skb) 3815 if (!skb)
3819 goto unlock; 3816 goto out;
3820 3817
3821 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0) 3818 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
3822 goto unlock; 3819 goto out;
3823 } 3820 }
3824#endif 3821#endif
3825#ifdef CONFIG_NET_CLS_ACT 3822#ifdef CONFIG_NET_CLS_ACT
@@ -3837,7 +3834,7 @@ ncls:
3837 if (vlan_do_receive(&skb)) 3834 if (vlan_do_receive(&skb))
3838 goto another_round; 3835 goto another_round;
3839 else if (unlikely(!skb)) 3836 else if (unlikely(!skb))
3840 goto unlock; 3837 goto out;
3841 } 3838 }
3842 3839
3843 rx_handler = rcu_dereference(skb->dev->rx_handler); 3840 rx_handler = rcu_dereference(skb->dev->rx_handler);
@@ -3849,7 +3846,7 @@ ncls:
3849 switch (rx_handler(&skb)) { 3846 switch (rx_handler(&skb)) {
3850 case RX_HANDLER_CONSUMED: 3847 case RX_HANDLER_CONSUMED:
3851 ret = NET_RX_SUCCESS; 3848 ret = NET_RX_SUCCESS;
3852 goto unlock; 3849 goto out;
3853 case RX_HANDLER_ANOTHER: 3850 case RX_HANDLER_ANOTHER:
3854 goto another_round; 3851 goto another_round;
3855 case RX_HANDLER_EXACT: 3852 case RX_HANDLER_EXACT:
@@ -3903,8 +3900,7 @@ drop:
3903 ret = NET_RX_DROP; 3900 ret = NET_RX_DROP;
3904 } 3901 }
3905 3902
3906unlock: 3903out:
3907 rcu_read_unlock();
3908 return ret; 3904 return ret;
3909} 3905}
3910 3906
@@ -3935,29 +3931,30 @@ static int __netif_receive_skb(struct sk_buff *skb)
3935 3931
3936static int netif_receive_skb_internal(struct sk_buff *skb) 3932static int netif_receive_skb_internal(struct sk_buff *skb)
3937{ 3933{
3934 int ret;
3935
3938 net_timestamp_check(netdev_tstamp_prequeue, skb); 3936 net_timestamp_check(netdev_tstamp_prequeue, skb);
3939 3937
3940 if (skb_defer_rx_timestamp(skb)) 3938 if (skb_defer_rx_timestamp(skb))
3941 return NET_RX_SUCCESS; 3939 return NET_RX_SUCCESS;
3942 3940
3941 rcu_read_lock();
3942
3943#ifdef CONFIG_RPS 3943#ifdef CONFIG_RPS
3944 if (static_key_false(&rps_needed)) { 3944 if (static_key_false(&rps_needed)) {
3945 struct rps_dev_flow voidflow, *rflow = &voidflow; 3945 struct rps_dev_flow voidflow, *rflow = &voidflow;
3946 int cpu, ret; 3946 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
3947
3948 rcu_read_lock();
3949
3950 cpu = get_rps_cpu(skb->dev, skb, &rflow);
3951 3947
3952 if (cpu >= 0) { 3948 if (cpu >= 0) {
3953 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 3949 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3954 rcu_read_unlock(); 3950 rcu_read_unlock();
3955 return ret; 3951 return ret;
3956 } 3952 }
3957 rcu_read_unlock();
3958 } 3953 }
3959#endif 3954#endif
3960 return __netif_receive_skb(skb); 3955 ret = __netif_receive_skb(skb);
3956 rcu_read_unlock();
3957 return ret;
3961} 3958}
3962 3959
3963/** 3960/**
@@ -4502,8 +4499,10 @@ static int process_backlog(struct napi_struct *napi, int quota)
4502 struct sk_buff *skb; 4499 struct sk_buff *skb;
4503 4500
4504 while ((skb = __skb_dequeue(&sd->process_queue))) { 4501 while ((skb = __skb_dequeue(&sd->process_queue))) {
4502 rcu_read_lock();
4505 local_irq_enable(); 4503 local_irq_enable();
4506 __netif_receive_skb(skb); 4504 __netif_receive_skb(skb);
4505 rcu_read_unlock();
4507 local_irq_disable(); 4506 local_irq_disable();
4508 input_queue_head_incr(sd); 4507 input_queue_head_incr(sd);
4509 if (++work >= quota) { 4508 if (++work >= quota) {
@@ -6139,6 +6138,7 @@ static void rollback_registered_many(struct list_head *head)
6139 unlist_netdevice(dev); 6138 unlist_netdevice(dev);
6140 6139
6141 dev->reg_state = NETREG_UNREGISTERING; 6140 dev->reg_state = NETREG_UNREGISTERING;
6141 on_each_cpu(flush_backlog, dev, 1);
6142 } 6142 }
6143 6143
6144 synchronize_net(); 6144 synchronize_net();
@@ -6409,7 +6409,8 @@ static int netif_alloc_netdev_queues(struct net_device *dev)
6409 struct netdev_queue *tx; 6409 struct netdev_queue *tx;
6410 size_t sz = count * sizeof(*tx); 6410 size_t sz = count * sizeof(*tx);
6411 6411
6412 BUG_ON(count < 1 || count > 0xffff); 6412 if (count < 1 || count > 0xffff)
6413 return -EINVAL;
6413 6414
6414 tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); 6415 tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6415 if (!tx) { 6416 if (!tx) {
@@ -6773,8 +6774,6 @@ void netdev_run_todo(void)
6773 6774
6774 dev->reg_state = NETREG_UNREGISTERED; 6775 dev->reg_state = NETREG_UNREGISTERED;
6775 6776
6776 on_each_cpu(flush_backlog, dev, 1);
6777
6778 netdev_wait_allrefs(dev); 6777 netdev_wait_allrefs(dev);
6779 6778
6780 /* paranoia */ 6779 /* paranoia */
diff --git a/net/core/dst.c b/net/core/dst.c
index e956ce6d1378..002144bea935 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -284,7 +284,9 @@ void dst_release(struct dst_entry *dst)
284 int newrefcnt; 284 int newrefcnt;
285 285
286 newrefcnt = atomic_dec_return(&dst->__refcnt); 286 newrefcnt = atomic_dec_return(&dst->__refcnt);
287 WARN_ON(newrefcnt < 0); 287 if (unlikely(newrefcnt < 0))
288 net_warn_ratelimited("%s: dst:%p refcnt:%d\n",
289 __func__, dst, newrefcnt);
288 if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt) 290 if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt)
289 call_rcu(&dst->rcu_head, dst_destroy_rcu); 291 call_rcu(&dst->rcu_head, dst_destroy_rcu);
290 } 292 }
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 9dfb88a933e7..92d886f4adcb 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -66,7 +66,7 @@
66 66
67 NOTES. 67 NOTES.
68 68
69 * avbps is scaled by 2^5, avpps is scaled by 2^10. 69 * avbps and avpps are scaled by 2^5.
70 * both values are reported as 32 bit unsigned values. bps can 70 * both values are reported as 32 bit unsigned values. bps can
71 overflow for fast links : max speed being 34360Mbit/sec 71 overflow for fast links : max speed being 34360Mbit/sec
72 * Minimal interval is HZ/4=250msec (it is the greatest common divisor 72 * Minimal interval is HZ/4=250msec (it is the greatest common divisor
@@ -85,10 +85,10 @@ struct gen_estimator
85 struct gnet_stats_rate_est64 *rate_est; 85 struct gnet_stats_rate_est64 *rate_est;
86 spinlock_t *stats_lock; 86 spinlock_t *stats_lock;
87 int ewma_log; 87 int ewma_log;
88 u32 last_packets;
89 unsigned long avpps;
88 u64 last_bytes; 90 u64 last_bytes;
89 u64 avbps; 91 u64 avbps;
90 u32 last_packets;
91 u32 avpps;
92 struct rcu_head e_rcu; 92 struct rcu_head e_rcu;
93 struct rb_node node; 93 struct rb_node node;
94 struct gnet_stats_basic_cpu __percpu *cpu_bstats; 94 struct gnet_stats_basic_cpu __percpu *cpu_bstats;
@@ -118,8 +118,8 @@ static void est_timer(unsigned long arg)
118 rcu_read_lock(); 118 rcu_read_lock();
119 list_for_each_entry_rcu(e, &elist[idx].list, list) { 119 list_for_each_entry_rcu(e, &elist[idx].list, list) {
120 struct gnet_stats_basic_packed b = {0}; 120 struct gnet_stats_basic_packed b = {0};
121 unsigned long rate;
121 u64 brate; 122 u64 brate;
122 u32 rate;
123 123
124 spin_lock(e->stats_lock); 124 spin_lock(e->stats_lock);
125 read_lock(&est_lock); 125 read_lock(&est_lock);
@@ -133,10 +133,11 @@ static void est_timer(unsigned long arg)
133 e->avbps += (brate >> e->ewma_log) - (e->avbps >> e->ewma_log); 133 e->avbps += (brate >> e->ewma_log) - (e->avbps >> e->ewma_log);
134 e->rate_est->bps = (e->avbps+0xF)>>5; 134 e->rate_est->bps = (e->avbps+0xF)>>5;
135 135
136 rate = (b.packets - e->last_packets)<<(12 - idx); 136 rate = b.packets - e->last_packets;
137 rate <<= (7 - idx);
137 e->last_packets = b.packets; 138 e->last_packets = b.packets;
138 e->avpps += (rate >> e->ewma_log) - (e->avpps >> e->ewma_log); 139 e->avpps += (rate >> e->ewma_log) - (e->avpps >> e->ewma_log);
139 e->rate_est->pps = (e->avpps+0x1FF)>>10; 140 e->rate_est->pps = (e->avpps + 0xF) >> 5;
140skip: 141skip:
141 read_unlock(&est_lock); 142 read_unlock(&est_lock);
142 spin_unlock(e->stats_lock); 143 spin_unlock(e->stats_lock);
diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c
index 1f2a126f4ffa..6441f47b1a8f 100644
--- a/net/core/netclassid_cgroup.c
+++ b/net/core/netclassid_cgroup.c
@@ -23,7 +23,8 @@ static inline struct cgroup_cls_state *css_cls_state(struct cgroup_subsys_state
23 23
24struct cgroup_cls_state *task_cls_state(struct task_struct *p) 24struct cgroup_cls_state *task_cls_state(struct task_struct *p)
25{ 25{
26 return css_cls_state(task_css(p, net_cls_cgrp_id)); 26 return css_cls_state(task_css_check(p, net_cls_cgrp_id,
27 rcu_read_lock_bh_held()));
27} 28}
28EXPORT_SYMBOL_GPL(task_cls_state); 29EXPORT_SYMBOL_GPL(task_cls_state);
29 30
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 05badbb58865..1cbd209192ea 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3514,8 +3514,6 @@ static int pktgen_thread_worker(void *arg)
3514 3514
3515 set_freezable(); 3515 set_freezable();
3516 3516
3517 __set_current_state(TASK_RUNNING);
3518
3519 while (!kthread_should_stop()) { 3517 while (!kthread_should_stop()) {
3520 pkt_dev = next_to_run(t); 3518 pkt_dev = next_to_run(t);
3521 3519
@@ -3560,7 +3558,6 @@ static int pktgen_thread_worker(void *arg)
3560 3558
3561 try_to_freeze(); 3559 try_to_freeze();
3562 } 3560 }
3563 set_current_state(TASK_INTERRUPTIBLE);
3564 3561
3565 pr_debug("%s stopping all device\n", t->tsk->comm); 3562 pr_debug("%s stopping all device\n", t->tsk->comm);
3566 pktgen_stop(t); 3563 pktgen_stop(t);
@@ -3571,13 +3568,6 @@ static int pktgen_thread_worker(void *arg)
3571 pr_debug("%s removing thread\n", t->tsk->comm); 3568 pr_debug("%s removing thread\n", t->tsk->comm);
3572 pktgen_rem_thread(t); 3569 pktgen_rem_thread(t);
3573 3570
3574 /* Wait for kthread_stop */
3575 while (!kthread_should_stop()) {
3576 set_current_state(TASK_INTERRUPTIBLE);
3577 schedule();
3578 }
3579 __set_current_state(TASK_RUNNING);
3580
3581 return 0; 3571 return 0;
3582} 3572}
3583 3573
@@ -3769,6 +3759,7 @@ static int __net_init pktgen_create_thread(int cpu, struct pktgen_net *pn)
3769 } 3759 }
3770 3760
3771 t->net = pn; 3761 t->net = pn;
3762 get_task_struct(p);
3772 wake_up_process(p); 3763 wake_up_process(p);
3773 wait_for_completion(&t->start_done); 3764 wait_for_completion(&t->start_done);
3774 3765
@@ -3891,6 +3882,7 @@ static void __net_exit pg_net_exit(struct net *net)
3891 t = list_entry(q, struct pktgen_thread, th_list); 3882 t = list_entry(q, struct pktgen_thread, th_list);
3892 list_del(&t->th_list); 3883 list_del(&t->th_list);
3893 kthread_stop(t->tsk); 3884 kthread_stop(t->tsk);
3885 put_task_struct(t->tsk);
3894 kfree(t); 3886 kfree(t);
3895 } 3887 }
3896 3888
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index 87b22c0bc08c..b42f0e26f89e 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -103,10 +103,16 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
103 spin_lock_bh(&queue->syn_wait_lock); 103 spin_lock_bh(&queue->syn_wait_lock);
104 while ((req = lopt->syn_table[i]) != NULL) { 104 while ((req = lopt->syn_table[i]) != NULL) {
105 lopt->syn_table[i] = req->dl_next; 105 lopt->syn_table[i] = req->dl_next;
106 /* Because of following del_timer_sync(),
107 * we must release the spinlock here
108 * or risk a dead lock.
109 */
110 spin_unlock_bh(&queue->syn_wait_lock);
106 atomic_inc(&lopt->qlen_dec); 111 atomic_inc(&lopt->qlen_dec);
107 if (del_timer(&req->rsk_timer)) 112 if (del_timer_sync(&req->rsk_timer))
108 reqsk_put(req); 113 reqsk_put(req);
109 reqsk_put(req); 114 reqsk_put(req);
115 spin_lock_bh(&queue->syn_wait_lock);
110 } 116 }
111 spin_unlock_bh(&queue->syn_wait_lock); 117 spin_unlock_bh(&queue->syn_wait_lock);
112 } 118 }
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 01ced4a889e0..dc004b1e1f85 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1328,10 +1328,6 @@ static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
1328 [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED }, 1328 [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED },
1329}; 1329};
1330 1330
1331static const struct nla_policy ifla_vfinfo_policy[IFLA_VF_INFO_MAX+1] = {
1332 [IFLA_VF_INFO] = { .type = NLA_NESTED },
1333};
1334
1335static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = { 1331static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
1336 [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) }, 1332 [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) },
1337 [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) }, 1333 [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) },
@@ -1488,96 +1484,98 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
1488 return 0; 1484 return 0;
1489} 1485}
1490 1486
1491static int do_setvfinfo(struct net_device *dev, struct nlattr *attr) 1487static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
1492{ 1488{
1493 int rem, err = -EINVAL;
1494 struct nlattr *vf;
1495 const struct net_device_ops *ops = dev->netdev_ops; 1489 const struct net_device_ops *ops = dev->netdev_ops;
1490 int err = -EINVAL;
1496 1491
1497 nla_for_each_nested(vf, attr, rem) { 1492 if (tb[IFLA_VF_MAC]) {
1498 switch (nla_type(vf)) { 1493 struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]);
1499 case IFLA_VF_MAC: {
1500 struct ifla_vf_mac *ivm;
1501 ivm = nla_data(vf);
1502 err = -EOPNOTSUPP;
1503 if (ops->ndo_set_vf_mac)
1504 err = ops->ndo_set_vf_mac(dev, ivm->vf,
1505 ivm->mac);
1506 break;
1507 }
1508 case IFLA_VF_VLAN: {
1509 struct ifla_vf_vlan *ivv;
1510 ivv = nla_data(vf);
1511 err = -EOPNOTSUPP;
1512 if (ops->ndo_set_vf_vlan)
1513 err = ops->ndo_set_vf_vlan(dev, ivv->vf,
1514 ivv->vlan,
1515 ivv->qos);
1516 break;
1517 }
1518 case IFLA_VF_TX_RATE: {
1519 struct ifla_vf_tx_rate *ivt;
1520 struct ifla_vf_info ivf;
1521 ivt = nla_data(vf);
1522 err = -EOPNOTSUPP;
1523 if (ops->ndo_get_vf_config)
1524 err = ops->ndo_get_vf_config(dev, ivt->vf,
1525 &ivf);
1526 if (err)
1527 break;
1528 err = -EOPNOTSUPP;
1529 if (ops->ndo_set_vf_rate)
1530 err = ops->ndo_set_vf_rate(dev, ivt->vf,
1531 ivf.min_tx_rate,
1532 ivt->rate);
1533 break;
1534 }
1535 case IFLA_VF_RATE: {
1536 struct ifla_vf_rate *ivt;
1537 ivt = nla_data(vf);
1538 err = -EOPNOTSUPP;
1539 if (ops->ndo_set_vf_rate)
1540 err = ops->ndo_set_vf_rate(dev, ivt->vf,
1541 ivt->min_tx_rate,
1542 ivt->max_tx_rate);
1543 break;
1544 }
1545 case IFLA_VF_SPOOFCHK: {
1546 struct ifla_vf_spoofchk *ivs;
1547 ivs = nla_data(vf);
1548 err = -EOPNOTSUPP;
1549 if (ops->ndo_set_vf_spoofchk)
1550 err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
1551 ivs->setting);
1552 break;
1553 }
1554 case IFLA_VF_LINK_STATE: {
1555 struct ifla_vf_link_state *ivl;
1556 ivl = nla_data(vf);
1557 err = -EOPNOTSUPP;
1558 if (ops->ndo_set_vf_link_state)
1559 err = ops->ndo_set_vf_link_state(dev, ivl->vf,
1560 ivl->link_state);
1561 break;
1562 }
1563 case IFLA_VF_RSS_QUERY_EN: {
1564 struct ifla_vf_rss_query_en *ivrssq_en;
1565 1494
1566 ivrssq_en = nla_data(vf); 1495 err = -EOPNOTSUPP;
1567 err = -EOPNOTSUPP; 1496 if (ops->ndo_set_vf_mac)
1568 if (ops->ndo_set_vf_rss_query_en) 1497 err = ops->ndo_set_vf_mac(dev, ivm->vf,
1569 err = ops->ndo_set_vf_rss_query_en(dev, 1498 ivm->mac);
1570 ivrssq_en->vf, 1499 if (err < 0)
1571 ivrssq_en->setting); 1500 return err;
1572 break; 1501 }
1573 } 1502
1574 default: 1503 if (tb[IFLA_VF_VLAN]) {
1575 err = -EINVAL; 1504 struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]);
1576 break; 1505
1577 } 1506 err = -EOPNOTSUPP;
1578 if (err) 1507 if (ops->ndo_set_vf_vlan)
1579 break; 1508 err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
1509 ivv->qos);
1510 if (err < 0)
1511 return err;
1512 }
1513
1514 if (tb[IFLA_VF_TX_RATE]) {
1515 struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]);
1516 struct ifla_vf_info ivf;
1517
1518 err = -EOPNOTSUPP;
1519 if (ops->ndo_get_vf_config)
1520 err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf);
1521 if (err < 0)
1522 return err;
1523
1524 err = -EOPNOTSUPP;
1525 if (ops->ndo_set_vf_rate)
1526 err = ops->ndo_set_vf_rate(dev, ivt->vf,
1527 ivf.min_tx_rate,
1528 ivt->rate);
1529 if (err < 0)
1530 return err;
1531 }
1532
1533 if (tb[IFLA_VF_RATE]) {
1534 struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]);
1535
1536 err = -EOPNOTSUPP;
1537 if (ops->ndo_set_vf_rate)
1538 err = ops->ndo_set_vf_rate(dev, ivt->vf,
1539 ivt->min_tx_rate,
1540 ivt->max_tx_rate);
1541 if (err < 0)
1542 return err;
1543 }
1544
1545 if (tb[IFLA_VF_SPOOFCHK]) {
1546 struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]);
1547
1548 err = -EOPNOTSUPP;
1549 if (ops->ndo_set_vf_spoofchk)
1550 err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
1551 ivs->setting);
1552 if (err < 0)
1553 return err;
1580 } 1554 }
1555
1556 if (tb[IFLA_VF_LINK_STATE]) {
1557 struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]);
1558
1559 err = -EOPNOTSUPP;
1560 if (ops->ndo_set_vf_link_state)
1561 err = ops->ndo_set_vf_link_state(dev, ivl->vf,
1562 ivl->link_state);
1563 if (err < 0)
1564 return err;
1565 }
1566
1567 if (tb[IFLA_VF_RSS_QUERY_EN]) {
1568 struct ifla_vf_rss_query_en *ivrssq_en;
1569
1570 err = -EOPNOTSUPP;
1571 ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]);
1572 if (ops->ndo_set_vf_rss_query_en)
1573 err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf,
1574 ivrssq_en->setting);
1575 if (err < 0)
1576 return err;
1577 }
1578
1581 return err; 1579 return err;
1582} 1580}
1583 1581
@@ -1773,14 +1771,21 @@ static int do_setlink(const struct sk_buff *skb,
1773 } 1771 }
1774 1772
1775 if (tb[IFLA_VFINFO_LIST]) { 1773 if (tb[IFLA_VFINFO_LIST]) {
1774 struct nlattr *vfinfo[IFLA_VF_MAX + 1];
1776 struct nlattr *attr; 1775 struct nlattr *attr;
1777 int rem; 1776 int rem;
1777
1778 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) { 1778 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
1779 if (nla_type(attr) != IFLA_VF_INFO) { 1779 if (nla_type(attr) != IFLA_VF_INFO ||
1780 nla_len(attr) < NLA_HDRLEN) {
1780 err = -EINVAL; 1781 err = -EINVAL;
1781 goto errout; 1782 goto errout;
1782 } 1783 }
1783 err = do_setvfinfo(dev, attr); 1784 err = nla_parse_nested(vfinfo, IFLA_VF_MAX, attr,
1785 ifla_vf_policy);
1786 if (err < 0)
1787 goto errout;
1788 err = do_setvfinfo(dev, vfinfo);
1784 if (err < 0) 1789 if (err < 0)
1785 goto errout; 1790 goto errout;
1786 status |= DO_SETLINK_NOTIFY; 1791 status |= DO_SETLINK_NOTIFY;
@@ -1799,10 +1804,13 @@ static int do_setlink(const struct sk_buff *skb,
1799 goto errout; 1804 goto errout;
1800 1805
1801 nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) { 1806 nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) {
1802 if (nla_type(attr) != IFLA_VF_PORT) 1807 if (nla_type(attr) != IFLA_VF_PORT ||
1803 continue; 1808 nla_len(attr) < NLA_HDRLEN) {
1804 err = nla_parse_nested(port, IFLA_PORT_MAX, 1809 err = -EINVAL;
1805 attr, ifla_port_policy); 1810 goto errout;
1811 }
1812 err = nla_parse_nested(port, IFLA_PORT_MAX, attr,
1813 ifla_port_policy);
1806 if (err < 0) 1814 if (err < 0)
1807 goto errout; 1815 goto errout;
1808 if (!port[IFLA_PORT_VF]) { 1816 if (!port[IFLA_PORT_VF]) {
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index b6a19ca0f99e..7b84330e5d30 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -340,7 +340,7 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
340 340
341 if (skb && frag_size) { 341 if (skb && frag_size) {
342 skb->head_frag = 1; 342 skb->head_frag = 1;
343 if (virt_to_head_page(data)->pfmemalloc) 343 if (page_is_pfmemalloc(virt_to_head_page(data)))
344 skb->pfmemalloc = 1; 344 skb->pfmemalloc = 1;
345 } 345 }
346 return skb; 346 return skb;
@@ -4022,8 +4022,8 @@ EXPORT_SYMBOL(skb_checksum_setup);
4022 * Otherwise returns the provided skb. Returns NULL in error cases 4022 * Otherwise returns the provided skb. Returns NULL in error cases
4023 * (e.g. transport_len exceeds skb length or out-of-memory). 4023 * (e.g. transport_len exceeds skb length or out-of-memory).
4024 * 4024 *
4025 * Caller needs to set the skb transport header and release the returned skb. 4025 * Caller needs to set the skb transport header and free any returned skb if it
4026 * Provided skb is consumed. 4026 * differs from the provided skb.
4027 */ 4027 */
4028static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb, 4028static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
4029 unsigned int transport_len) 4029 unsigned int transport_len)
@@ -4032,16 +4032,12 @@ static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
4032 unsigned int len = skb_transport_offset(skb) + transport_len; 4032 unsigned int len = skb_transport_offset(skb) + transport_len;
4033 int ret; 4033 int ret;
4034 4034
4035 if (skb->len < len) { 4035 if (skb->len < len)
4036 kfree_skb(skb);
4037 return NULL; 4036 return NULL;
4038 } else if (skb->len == len) { 4037 else if (skb->len == len)
4039 return skb; 4038 return skb;
4040 }
4041 4039
4042 skb_chk = skb_clone(skb, GFP_ATOMIC); 4040 skb_chk = skb_clone(skb, GFP_ATOMIC);
4043 kfree_skb(skb);
4044
4045 if (!skb_chk) 4041 if (!skb_chk)
4046 return NULL; 4042 return NULL;
4047 4043
@@ -4066,8 +4062,8 @@ static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
4066 * If the skb has data beyond the given transport length, then a 4062 * If the skb has data beyond the given transport length, then a
4067 * trimmed & cloned skb is checked and returned. 4063 * trimmed & cloned skb is checked and returned.
4068 * 4064 *
4069 * Caller needs to set the skb transport header and release the returned skb. 4065 * Caller needs to set the skb transport header and free any returned skb if it
4070 * Provided skb is consumed. 4066 * differs from the provided skb.
4071 */ 4067 */
4072struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, 4068struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
4073 unsigned int transport_len, 4069 unsigned int transport_len,
@@ -4079,23 +4075,26 @@ struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
4079 4075
4080 skb_chk = skb_checksum_maybe_trim(skb, transport_len); 4076 skb_chk = skb_checksum_maybe_trim(skb, transport_len);
4081 if (!skb_chk) 4077 if (!skb_chk)
4082 return NULL; 4078 goto err;
4083 4079
4084 if (!pskb_may_pull(skb_chk, offset)) { 4080 if (!pskb_may_pull(skb_chk, offset))
4085 kfree_skb(skb_chk); 4081 goto err;
4086 return NULL;
4087 }
4088 4082
4089 __skb_pull(skb_chk, offset); 4083 __skb_pull(skb_chk, offset);
4090 ret = skb_chkf(skb_chk); 4084 ret = skb_chkf(skb_chk);
4091 __skb_push(skb_chk, offset); 4085 __skb_push(skb_chk, offset);
4092 4086
4093 if (ret) { 4087 if (ret)
4094 kfree_skb(skb_chk); 4088 goto err;
4095 return NULL;
4096 }
4097 4089
4098 return skb_chk; 4090 return skb_chk;
4091
4092err:
4093 if (skb_chk && skb_chk != skb)
4094 kfree_skb(skb_chk);
4095
4096 return NULL;
4097
4099} 4098}
4100EXPORT_SYMBOL(skb_checksum_trimmed); 4099EXPORT_SYMBOL(skb_checksum_trimmed);
4101 4100
diff --git a/net/core/sock.c b/net/core/sock.c
index 08f16db46070..193901d09757 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1497,7 +1497,8 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1497 sock_copy(newsk, sk); 1497 sock_copy(newsk, sk);
1498 1498
1499 /* SANITY */ 1499 /* SANITY */
1500 get_net(sock_net(newsk)); 1500 if (likely(newsk->sk_net_refcnt))
1501 get_net(sock_net(newsk));
1501 sk_node_init(&newsk->sk_node); 1502 sk_node_init(&newsk->sk_node);
1502 sock_lock_init(newsk); 1503 sock_lock_init(newsk);
1503 bh_lock_sock(newsk); 1504 bh_lock_sock(newsk);
@@ -1967,20 +1968,21 @@ static void __release_sock(struct sock *sk)
1967 * sk_wait_data - wait for data to arrive at sk_receive_queue 1968 * sk_wait_data - wait for data to arrive at sk_receive_queue
1968 * @sk: sock to wait on 1969 * @sk: sock to wait on
1969 * @timeo: for how long 1970 * @timeo: for how long
1971 * @skb: last skb seen on sk_receive_queue
1970 * 1972 *
1971 * Now socket state including sk->sk_err is changed only under lock, 1973 * Now socket state including sk->sk_err is changed only under lock,
1972 * hence we may omit checks after joining wait queue. 1974 * hence we may omit checks after joining wait queue.
1973 * We check receive queue before schedule() only as optimization; 1975 * We check receive queue before schedule() only as optimization;
1974 * it is very likely that release_sock() added new data. 1976 * it is very likely that release_sock() added new data.
1975 */ 1977 */
1976int sk_wait_data(struct sock *sk, long *timeo) 1978int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb)
1977{ 1979{
1978 int rc; 1980 int rc;
1979 DEFINE_WAIT(wait); 1981 DEFINE_WAIT(wait);
1980 1982
1981 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1983 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1982 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1984 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1983 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue)); 1985 rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb);
1984 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1986 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1985 finish_wait(sk_sleep(sk), &wait); 1987 finish_wait(sk_sleep(sk), &wait);
1986 return rc; 1988 return rc;
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 52a94016526d..b5cf13a28009 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -886,7 +886,7 @@ verify_sock_status:
886 break; 886 break;
887 } 887 }
888 888
889 sk_wait_data(sk, &timeo); 889 sk_wait_data(sk, &timeo, NULL);
890 continue; 890 continue;
891 found_ok_skb: 891 found_ok_skb:
892 if (len > skb->len) 892 if (len > skb->len)
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 392e29a0227d..b445d492c115 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -630,7 +630,7 @@ static int dsa_of_probe(struct device *dev)
630 continue; 630 continue;
631 631
632 cd->sw_addr = be32_to_cpup(sw_addr); 632 cd->sw_addr = be32_to_cpup(sw_addr);
633 if (cd->sw_addr > PHY_MAX_ADDR) 633 if (cd->sw_addr >= PHY_MAX_ADDR)
634 continue; 634 continue;
635 635
636 if (!of_property_read_u32(child, "eeprom-length", &eeprom_len)) 636 if (!of_property_read_u32(child, "eeprom-length", &eeprom_len))
@@ -642,6 +642,8 @@ static int dsa_of_probe(struct device *dev)
642 continue; 642 continue;
643 643
644 port_index = be32_to_cpup(port_reg); 644 port_index = be32_to_cpup(port_reg);
645 if (port_index >= DSA_MAX_PORTS)
646 break;
645 647
646 port_name = of_get_property(port, "label", NULL); 648 port_name = of_get_property(port, "label", NULL);
647 if (!port_name) 649 if (!port_name)
@@ -666,8 +668,6 @@ static int dsa_of_probe(struct device *dev)
666 goto out_free_chip; 668 goto out_free_chip;
667 } 669 }
668 670
669 if (port_index == DSA_MAX_PORTS)
670 break;
671 } 671 }
672 } 672 }
673 673
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 0917123790ea..35c47ddd04f0 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -756,7 +756,8 @@ static int dsa_slave_phy_connect(struct dsa_slave_priv *p,
756 return -ENODEV; 756 return -ENODEV;
757 757
758 /* Use already configured phy mode */ 758 /* Use already configured phy mode */
759 p->phy_interface = p->phy->interface; 759 if (p->phy_interface == PHY_INTERFACE_MODE_NA)
760 p->phy_interface = p->phy->interface;
760 phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link, 761 phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
761 p->phy_interface); 762 p->phy_interface);
762 763
diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
index f46e4d1306f2..214d44aef35b 100644
--- a/net/ieee802154/6lowpan/reassembly.c
+++ b/net/ieee802154/6lowpan/reassembly.c
@@ -207,7 +207,7 @@ found:
207 } else { 207 } else {
208 fq->q.meat += skb->len; 208 fq->q.meat += skb->len;
209 } 209 }
210 add_frag_mem_limit(&fq->q, skb->truesize); 210 add_frag_mem_limit(fq->q.net, skb->truesize);
211 211
212 if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && 212 if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
213 fq->q.meat == fq->q.len) { 213 fq->q.meat == fq->q.len) {
@@ -287,7 +287,7 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
287 clone->data_len = clone->len; 287 clone->data_len = clone->len;
288 head->data_len -= clone->len; 288 head->data_len -= clone->len;
289 head->len -= clone->len; 289 head->len -= clone->len;
290 add_frag_mem_limit(&fq->q, clone->truesize); 290 add_frag_mem_limit(fq->q.net, clone->truesize);
291 } 291 }
292 292
293 WARN_ON(head == NULL); 293 WARN_ON(head == NULL);
@@ -310,7 +310,7 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
310 } 310 }
311 fp = next; 311 fp = next;
312 } 312 }
313 sub_frag_mem_limit(&fq->q, sum_truesize); 313 sub_frag_mem_limit(fq->q.net, sum_truesize);
314 314
315 head->next = NULL; 315 head->next = NULL;
316 head->dev = dev; 316 head->dev = dev;
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 933a92820d26..6c8b1fbafce8 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1017,14 +1017,16 @@ static int arp_req_get(struct arpreq *r, struct net_device *dev)
1017 1017
1018 neigh = neigh_lookup(&arp_tbl, &ip, dev); 1018 neigh = neigh_lookup(&arp_tbl, &ip, dev);
1019 if (neigh) { 1019 if (neigh) {
1020 read_lock_bh(&neigh->lock); 1020 if (!(neigh->nud_state & NUD_NOARP)) {
1021 memcpy(r->arp_ha.sa_data, neigh->ha, dev->addr_len); 1021 read_lock_bh(&neigh->lock);
1022 r->arp_flags = arp_state_to_flags(neigh); 1022 memcpy(r->arp_ha.sa_data, neigh->ha, dev->addr_len);
1023 read_unlock_bh(&neigh->lock); 1023 r->arp_flags = arp_state_to_flags(neigh);
1024 r->arp_ha.sa_family = dev->type; 1024 read_unlock_bh(&neigh->lock);
1025 strlcpy(r->arp_dev, dev->name, sizeof(r->arp_dev)); 1025 r->arp_ha.sa_family = dev->type;
1026 strlcpy(r->arp_dev, dev->name, sizeof(r->arp_dev));
1027 err = 0;
1028 }
1026 neigh_release(neigh); 1029 neigh_release(neigh);
1027 err = 0;
1028 } 1030 }
1029 return err; 1031 return err;
1030} 1032}
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index 90c0e8386116..574fad9cca05 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -20,7 +20,7 @@
20#include <net/route.h> 20#include <net/route.h>
21#include <net/tcp_states.h> 21#include <net/tcp_states.h>
22 22
23int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 23int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
24{ 24{
25 struct inet_sock *inet = inet_sk(sk); 25 struct inet_sock *inet = inet_sk(sk);
26 struct sockaddr_in *usin = (struct sockaddr_in *) uaddr; 26 struct sockaddr_in *usin = (struct sockaddr_in *) uaddr;
@@ -39,8 +39,6 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
39 39
40 sk_dst_reset(sk); 40 sk_dst_reset(sk);
41 41
42 lock_sock(sk);
43
44 oif = sk->sk_bound_dev_if; 42 oif = sk->sk_bound_dev_if;
45 saddr = inet->inet_saddr; 43 saddr = inet->inet_saddr;
46 if (ipv4_is_multicast(usin->sin_addr.s_addr)) { 44 if (ipv4_is_multicast(usin->sin_addr.s_addr)) {
@@ -82,9 +80,19 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
82 sk_dst_set(sk, &rt->dst); 80 sk_dst_set(sk, &rt->dst);
83 err = 0; 81 err = 0;
84out: 82out:
85 release_sock(sk);
86 return err; 83 return err;
87} 84}
85EXPORT_SYMBOL(__ip4_datagram_connect);
86
87int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
88{
89 int res;
90
91 lock_sock(sk);
92 res = __ip4_datagram_connect(sk, uaddr, addr_len);
93 release_sock(sk);
94 return res;
95}
88EXPORT_SYMBOL(ip4_datagram_connect); 96EXPORT_SYMBOL(ip4_datagram_connect);
89 97
90/* Because UDP xmit path can manipulate sk_dst_cache without holding 98/* Because UDP xmit path can manipulate sk_dst_cache without holding
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 7498716e8f54..2d9cb1748f81 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -882,7 +882,6 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
882 queue_delayed_work(system_power_efficient_wq, 882 queue_delayed_work(system_power_efficient_wq,
883 &check_lifetime_work, 0); 883 &check_lifetime_work, 0);
884 rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid); 884 rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid);
885 blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
886 } 885 }
887 return 0; 886 return 0;
888} 887}
@@ -1740,6 +1739,8 @@ static int inet_netconf_msgsize_devconf(int type)
1740 size += nla_total_size(4); 1739 size += nla_total_size(4);
1741 if (type == -1 || type == NETCONFA_PROXY_NEIGH) 1740 if (type == -1 || type == NETCONFA_PROXY_NEIGH)
1742 size += nla_total_size(4); 1741 size += nla_total_size(4);
1742 if (type == -1 || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN)
1743 size += nla_total_size(4);
1743 1744
1744 return size; 1745 return size;
1745} 1746}
@@ -1780,6 +1781,10 @@ static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
1780 nla_put_s32(skb, NETCONFA_PROXY_NEIGH, 1781 nla_put_s32(skb, NETCONFA_PROXY_NEIGH,
1781 IPV4_DEVCONF(*devconf, PROXY_ARP)) < 0) 1782 IPV4_DEVCONF(*devconf, PROXY_ARP)) < 0)
1782 goto nla_put_failure; 1783 goto nla_put_failure;
1784 if ((type == -1 || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
1785 nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
1786 IPV4_DEVCONF(*devconf, IGNORE_ROUTES_WITH_LINKDOWN)) < 0)
1787 goto nla_put_failure;
1783 1788
1784 nlmsg_end(skb, nlh); 1789 nlmsg_end(skb, nlh);
1785 return 0; 1790 return 0;
@@ -1819,6 +1824,7 @@ static const struct nla_policy devconf_ipv4_policy[NETCONFA_MAX+1] = {
1819 [NETCONFA_FORWARDING] = { .len = sizeof(int) }, 1824 [NETCONFA_FORWARDING] = { .len = sizeof(int) },
1820 [NETCONFA_RP_FILTER] = { .len = sizeof(int) }, 1825 [NETCONFA_RP_FILTER] = { .len = sizeof(int) },
1821 [NETCONFA_PROXY_NEIGH] = { .len = sizeof(int) }, 1826 [NETCONFA_PROXY_NEIGH] = { .len = sizeof(int) },
1827 [NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN] = { .len = sizeof(int) },
1822}; 1828};
1823 1829
1824static int inet_netconf_get_devconf(struct sk_buff *in_skb, 1830static int inet_netconf_get_devconf(struct sk_buff *in_skb,
@@ -2048,6 +2054,12 @@ static int devinet_conf_proc(struct ctl_table *ctl, int write,
2048 inet_netconf_notify_devconf(net, NETCONFA_PROXY_NEIGH, 2054 inet_netconf_notify_devconf(net, NETCONFA_PROXY_NEIGH,
2049 ifindex, cnf); 2055 ifindex, cnf);
2050 } 2056 }
2057 if (i == IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN - 1 &&
2058 new_value != old_value) {
2059 ifindex = devinet_conf_ifindex(net, cnf);
2060 inet_netconf_notify_devconf(net, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
2061 ifindex, cnf);
2062 }
2051 } 2063 }
2052 2064
2053 return ret; 2065 return ret;
diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h
index c6211ed60b03..9c02920725db 100644
--- a/net/ipv4/fib_lookup.h
+++ b/net/ipv4/fib_lookup.h
@@ -13,6 +13,7 @@ struct fib_alias {
13 u8 fa_state; 13 u8 fa_state;
14 u8 fa_slen; 14 u8 fa_slen;
15 u32 tb_id; 15 u32 tb_id;
16 s16 fa_default;
16 struct rcu_head rcu; 17 struct rcu_head rcu;
17}; 18};
18 19
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index c7358ea4ae93..3a06586b170c 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1202,23 +1202,40 @@ int fib_sync_down_dev(struct net_device *dev, unsigned long event)
1202} 1202}
1203 1203
1204/* Must be invoked inside of an RCU protected region. */ 1204/* Must be invoked inside of an RCU protected region. */
1205void fib_select_default(struct fib_result *res) 1205void fib_select_default(const struct flowi4 *flp, struct fib_result *res)
1206{ 1206{
1207 struct fib_info *fi = NULL, *last_resort = NULL; 1207 struct fib_info *fi = NULL, *last_resort = NULL;
1208 struct hlist_head *fa_head = res->fa_head; 1208 struct hlist_head *fa_head = res->fa_head;
1209 struct fib_table *tb = res->table; 1209 struct fib_table *tb = res->table;
1210 u8 slen = 32 - res->prefixlen;
1210 int order = -1, last_idx = -1; 1211 int order = -1, last_idx = -1;
1211 struct fib_alias *fa; 1212 struct fib_alias *fa, *fa1 = NULL;
1213 u32 last_prio = res->fi->fib_priority;
1214 u8 last_tos = 0;
1212 1215
1213 hlist_for_each_entry_rcu(fa, fa_head, fa_list) { 1216 hlist_for_each_entry_rcu(fa, fa_head, fa_list) {
1214 struct fib_info *next_fi = fa->fa_info; 1217 struct fib_info *next_fi = fa->fa_info;
1215 1218
1219 if (fa->fa_slen != slen)
1220 continue;
1221 if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
1222 continue;
1223 if (fa->tb_id != tb->tb_id)
1224 continue;
1225 if (next_fi->fib_priority > last_prio &&
1226 fa->fa_tos == last_tos) {
1227 if (last_tos)
1228 continue;
1229 break;
1230 }
1231 if (next_fi->fib_flags & RTNH_F_DEAD)
1232 continue;
1233 last_tos = fa->fa_tos;
1234 last_prio = next_fi->fib_priority;
1235
1216 if (next_fi->fib_scope != res->scope || 1236 if (next_fi->fib_scope != res->scope ||
1217 fa->fa_type != RTN_UNICAST) 1237 fa->fa_type != RTN_UNICAST)
1218 continue; 1238 continue;
1219
1220 if (next_fi->fib_priority > res->fi->fib_priority)
1221 break;
1222 if (!next_fi->fib_nh[0].nh_gw || 1239 if (!next_fi->fib_nh[0].nh_gw ||
1223 next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK) 1240 next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
1224 continue; 1241 continue;
@@ -1228,10 +1245,11 @@ void fib_select_default(struct fib_result *res)
1228 if (!fi) { 1245 if (!fi) {
1229 if (next_fi != res->fi) 1246 if (next_fi != res->fi)
1230 break; 1247 break;
1248 fa1 = fa;
1231 } else if (!fib_detect_death(fi, order, &last_resort, 1249 } else if (!fib_detect_death(fi, order, &last_resort,
1232 &last_idx, tb->tb_default)) { 1250 &last_idx, fa1->fa_default)) {
1233 fib_result_assign(res, fi); 1251 fib_result_assign(res, fi);
1234 tb->tb_default = order; 1252 fa1->fa_default = order;
1235 goto out; 1253 goto out;
1236 } 1254 }
1237 fi = next_fi; 1255 fi = next_fi;
@@ -1239,20 +1257,21 @@ void fib_select_default(struct fib_result *res)
1239 } 1257 }
1240 1258
1241 if (order <= 0 || !fi) { 1259 if (order <= 0 || !fi) {
1242 tb->tb_default = -1; 1260 if (fa1)
1261 fa1->fa_default = -1;
1243 goto out; 1262 goto out;
1244 } 1263 }
1245 1264
1246 if (!fib_detect_death(fi, order, &last_resort, &last_idx, 1265 if (!fib_detect_death(fi, order, &last_resort, &last_idx,
1247 tb->tb_default)) { 1266 fa1->fa_default)) {
1248 fib_result_assign(res, fi); 1267 fib_result_assign(res, fi);
1249 tb->tb_default = order; 1268 fa1->fa_default = order;
1250 goto out; 1269 goto out;
1251 } 1270 }
1252 1271
1253 if (last_idx >= 0) 1272 if (last_idx >= 0)
1254 fib_result_assign(res, last_resort); 1273 fib_result_assign(res, last_resort);
1255 tb->tb_default = last_idx; 1274 fa1->fa_default = last_idx;
1256out: 1275out:
1257 return; 1276 return;
1258} 1277}
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 15d32612e3c6..b0c6258ffb79 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1171,6 +1171,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
1171 new_fa->fa_state = state & ~FA_S_ACCESSED; 1171 new_fa->fa_state = state & ~FA_S_ACCESSED;
1172 new_fa->fa_slen = fa->fa_slen; 1172 new_fa->fa_slen = fa->fa_slen;
1173 new_fa->tb_id = tb->tb_id; 1173 new_fa->tb_id = tb->tb_id;
1174 new_fa->fa_default = -1;
1174 1175
1175 err = switchdev_fib_ipv4_add(key, plen, fi, 1176 err = switchdev_fib_ipv4_add(key, plen, fi,
1176 new_fa->fa_tos, 1177 new_fa->fa_tos,
@@ -1222,6 +1223,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
1222 new_fa->fa_state = 0; 1223 new_fa->fa_state = 0;
1223 new_fa->fa_slen = slen; 1224 new_fa->fa_slen = slen;
1224 new_fa->tb_id = tb->tb_id; 1225 new_fa->tb_id = tb->tb_id;
1226 new_fa->fa_default = -1;
1225 1227
1226 /* (Optionally) offload fib entry to switch hardware. */ 1228 /* (Optionally) offload fib entry to switch hardware. */
1227 err = switchdev_fib_ipv4_add(key, plen, fi, tos, cfg->fc_type, 1229 err = switchdev_fib_ipv4_add(key, plen, fi, tos, cfg->fc_type,
@@ -1791,8 +1793,6 @@ void fib_table_flush_external(struct fib_table *tb)
1791 if (hlist_empty(&n->leaf)) { 1793 if (hlist_empty(&n->leaf)) {
1792 put_child_root(pn, n->key, NULL); 1794 put_child_root(pn, n->key, NULL);
1793 node_free(n); 1795 node_free(n);
1794 } else {
1795 leaf_pull_suffix(pn, n);
1796 } 1796 }
1797 } 1797 }
1798} 1798}
@@ -1862,8 +1862,6 @@ int fib_table_flush(struct fib_table *tb)
1862 if (hlist_empty(&n->leaf)) { 1862 if (hlist_empty(&n->leaf)) {
1863 put_child_root(pn, n->key, NULL); 1863 put_child_root(pn, n->key, NULL);
1864 node_free(n); 1864 node_free(n);
1865 } else {
1866 leaf_pull_suffix(pn, n);
1867 } 1865 }
1868 } 1866 }
1869 1867
@@ -1990,7 +1988,6 @@ struct fib_table *fib_trie_table(u32 id, struct fib_table *alias)
1990 return NULL; 1988 return NULL;
1991 1989
1992 tb->tb_id = id; 1990 tb->tb_id = id;
1993 tb->tb_default = -1;
1994 tb->tb_num_default = 0; 1991 tb->tb_num_default = 0;
1995 tb->tb_data = (alias ? alias->__data : tb->__data); 1992 tb->tb_data = (alias ? alias->__data : tb->__data);
1996 1993
@@ -2468,7 +2465,7 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
2468 key = l->key + 1; 2465 key = l->key + 1;
2469 iter->pos++; 2466 iter->pos++;
2470 2467
2471 if (pos-- <= 0) 2468 if (--pos <= 0)
2472 break; 2469 break;
2473 2470
2474 l = NULL; 2471 l = NULL;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 651cdf648ec4..9fdfd9deac11 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1435,33 +1435,35 @@ static int __ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
1435 struct sk_buff *skb_chk; 1435 struct sk_buff *skb_chk;
1436 unsigned int transport_len; 1436 unsigned int transport_len;
1437 unsigned int len = skb_transport_offset(skb) + sizeof(struct igmphdr); 1437 unsigned int len = skb_transport_offset(skb) + sizeof(struct igmphdr);
1438 int ret; 1438 int ret = -EINVAL;
1439 1439
1440 transport_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb); 1440 transport_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
1441 1441
1442 skb_get(skb);
1443 skb_chk = skb_checksum_trimmed(skb, transport_len, 1442 skb_chk = skb_checksum_trimmed(skb, transport_len,
1444 ip_mc_validate_checksum); 1443 ip_mc_validate_checksum);
1445 if (!skb_chk) 1444 if (!skb_chk)
1446 return -EINVAL; 1445 goto err;
1447 1446
1448 if (!pskb_may_pull(skb_chk, len)) { 1447 if (!pskb_may_pull(skb_chk, len))
1449 kfree_skb(skb_chk); 1448 goto err;
1450 return -EINVAL;
1451 }
1452 1449
1453 ret = ip_mc_check_igmp_msg(skb_chk); 1450 ret = ip_mc_check_igmp_msg(skb_chk);
1454 if (ret) { 1451 if (ret)
1455 kfree_skb(skb_chk); 1452 goto err;
1456 return ret;
1457 }
1458 1453
1459 if (skb_trimmed) 1454 if (skb_trimmed)
1460 *skb_trimmed = skb_chk; 1455 *skb_trimmed = skb_chk;
1461 else 1456 /* free now unneeded clone */
1457 else if (skb_chk != skb)
1462 kfree_skb(skb_chk); 1458 kfree_skb(skb_chk);
1463 1459
1464 return 0; 1460 ret = 0;
1461
1462err:
1463 if (ret && skb_chk && skb_chk != skb)
1464 kfree_skb(skb_chk);
1465
1466 return ret;
1465} 1467}
1466 1468
1467/** 1469/**
@@ -1470,7 +1472,7 @@ static int __ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
1470 * @skb_trimmed: to store an skb pointer trimmed to IPv4 packet tail (optional) 1472 * @skb_trimmed: to store an skb pointer trimmed to IPv4 packet tail (optional)
1471 * 1473 *
1472 * Checks whether an IPv4 packet is a valid IGMP packet. If so sets 1474 * Checks whether an IPv4 packet is a valid IGMP packet. If so sets
1473 * skb network and transport headers accordingly and returns zero. 1475 * skb transport header accordingly and returns zero.
1474 * 1476 *
1475 * -EINVAL: A broken packet was detected, i.e. it violates some internet 1477 * -EINVAL: A broken packet was detected, i.e. it violates some internet
1476 * standard 1478 * standard
@@ -1485,7 +1487,8 @@ static int __ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
1485 * to leave the original skb and its full frame unchanged (which might be 1487 * to leave the original skb and its full frame unchanged (which might be
1486 * desirable for layer 2 frame jugglers). 1488 * desirable for layer 2 frame jugglers).
1487 * 1489 *
1488 * The caller needs to release a reference count from any returned skb_trimmed. 1490 * Caller needs to set the skb network header and free any returned skb if it
1491 * differs from the provided skb.
1489 */ 1492 */
1490int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed) 1493int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
1491{ 1494{
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 60021d0d9326..134957159c27 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -593,7 +593,7 @@ static bool reqsk_queue_unlink(struct request_sock_queue *queue,
593 } 593 }
594 594
595 spin_unlock(&queue->syn_wait_lock); 595 spin_unlock(&queue->syn_wait_lock);
596 if (del_timer(&req->rsk_timer)) 596 if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
597 reqsk_put(req); 597 reqsk_put(req);
598 return found; 598 return found;
599} 599}
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 9bc26677058e..c3b1f3a0f4cf 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -152,8 +152,8 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
152 inet6_sk(sk)->tclass) < 0) 152 inet6_sk(sk)->tclass) < 0)
153 goto errout; 153 goto errout;
154 154
155 if (ipv6_only_sock(sk) && 155 if (((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) &&
156 nla_put_u8(skb, INET_DIAG_SKV6ONLY, 1)) 156 nla_put_u8(skb, INET_DIAG_SKV6ONLY, ipv6_only_sock(sk)))
157 goto errout; 157 goto errout;
158 } 158 }
159#endif 159#endif
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 5e346a082e5f..d0a7c0319e3d 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -131,34 +131,22 @@ inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb)
131 unsigned int evicted = 0; 131 unsigned int evicted = 0;
132 HLIST_HEAD(expired); 132 HLIST_HEAD(expired);
133 133
134evict_again:
135 spin_lock(&hb->chain_lock); 134 spin_lock(&hb->chain_lock);
136 135
137 hlist_for_each_entry_safe(fq, n, &hb->chain, list) { 136 hlist_for_each_entry_safe(fq, n, &hb->chain, list) {
138 if (!inet_fragq_should_evict(fq)) 137 if (!inet_fragq_should_evict(fq))
139 continue; 138 continue;
140 139
141 if (!del_timer(&fq->timer)) { 140 if (!del_timer(&fq->timer))
142 /* q expiring right now thus increment its refcount so 141 continue;
143 * it won't be freed under us and wait until the timer
144 * has finished executing then destroy it
145 */
146 atomic_inc(&fq->refcnt);
147 spin_unlock(&hb->chain_lock);
148 del_timer_sync(&fq->timer);
149 inet_frag_put(fq, f);
150 goto evict_again;
151 }
152 142
153 fq->flags |= INET_FRAG_EVICTED; 143 hlist_add_head(&fq->list_evictor, &expired);
154 hlist_del(&fq->list);
155 hlist_add_head(&fq->list, &expired);
156 ++evicted; 144 ++evicted;
157 } 145 }
158 146
159 spin_unlock(&hb->chain_lock); 147 spin_unlock(&hb->chain_lock);
160 148
161 hlist_for_each_entry_safe(fq, n, &expired, list) 149 hlist_for_each_entry_safe(fq, n, &expired, list_evictor)
162 f->frag_expire((unsigned long) fq); 150 f->frag_expire((unsigned long) fq);
163 151
164 return evicted; 152 return evicted;
@@ -240,18 +228,20 @@ void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
240 int i; 228 int i;
241 229
242 nf->low_thresh = 0; 230 nf->low_thresh = 0;
243 local_bh_disable();
244 231
245evict_again: 232evict_again:
233 local_bh_disable();
246 seq = read_seqbegin(&f->rnd_seqlock); 234 seq = read_seqbegin(&f->rnd_seqlock);
247 235
248 for (i = 0; i < INETFRAGS_HASHSZ ; i++) 236 for (i = 0; i < INETFRAGS_HASHSZ ; i++)
249 inet_evict_bucket(f, &f->hash[i]); 237 inet_evict_bucket(f, &f->hash[i]);
250 238
251 if (read_seqretry(&f->rnd_seqlock, seq))
252 goto evict_again;
253
254 local_bh_enable(); 239 local_bh_enable();
240 cond_resched();
241
242 if (read_seqretry(&f->rnd_seqlock, seq) ||
243 percpu_counter_sum(&nf->mem))
244 goto evict_again;
255 245
256 percpu_counter_destroy(&nf->mem); 246 percpu_counter_destroy(&nf->mem);
257} 247}
@@ -284,8 +274,8 @@ static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
284 struct inet_frag_bucket *hb; 274 struct inet_frag_bucket *hb;
285 275
286 hb = get_frag_bucket_locked(fq, f); 276 hb = get_frag_bucket_locked(fq, f);
287 if (!(fq->flags & INET_FRAG_EVICTED)) 277 hlist_del(&fq->list);
288 hlist_del(&fq->list); 278 fq->flags |= INET_FRAG_COMPLETE;
289 spin_unlock(&hb->chain_lock); 279 spin_unlock(&hb->chain_lock);
290} 280}
291 281
@@ -297,7 +287,6 @@ void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
297 if (!(fq->flags & INET_FRAG_COMPLETE)) { 287 if (!(fq->flags & INET_FRAG_COMPLETE)) {
298 fq_unlink(fq, f); 288 fq_unlink(fq, f);
299 atomic_dec(&fq->refcnt); 289 atomic_dec(&fq->refcnt);
300 fq->flags |= INET_FRAG_COMPLETE;
301 } 290 }
302} 291}
303EXPORT_SYMBOL(inet_frag_kill); 292EXPORT_SYMBOL(inet_frag_kill);
@@ -330,11 +319,12 @@ void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
330 fp = xp; 319 fp = xp;
331 } 320 }
332 sum = sum_truesize + f->qsize; 321 sum = sum_truesize + f->qsize;
333 sub_frag_mem_limit(q, sum);
334 322
335 if (f->destructor) 323 if (f->destructor)
336 f->destructor(q); 324 f->destructor(q);
337 kmem_cache_free(f->frags_cachep, q); 325 kmem_cache_free(f->frags_cachep, q);
326
327 sub_frag_mem_limit(nf, sum);
338} 328}
339EXPORT_SYMBOL(inet_frag_destroy); 329EXPORT_SYMBOL(inet_frag_destroy);
340 330
@@ -390,7 +380,7 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
390 380
391 q->net = nf; 381 q->net = nf;
392 f->constructor(q, arg); 382 f->constructor(q, arg);
393 add_frag_mem_limit(q, f->qsize); 383 add_frag_mem_limit(nf, f->qsize);
394 384
395 setup_timer(&q->timer, f->frag_expire, (unsigned long)q); 385 setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
396 spin_lock_init(&q->lock); 386 spin_lock_init(&q->lock);
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 5f9b063bbe8a..0cb9165421d4 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -624,22 +624,21 @@ EXPORT_SYMBOL_GPL(inet_hashinfo_init);
624 624
625int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo) 625int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
626{ 626{
627 unsigned int locksz = sizeof(spinlock_t);
627 unsigned int i, nblocks = 1; 628 unsigned int i, nblocks = 1;
628 629
629 if (sizeof(spinlock_t) != 0) { 630 if (locksz != 0) {
630 /* allocate 2 cache lines or at least one spinlock per cpu */ 631 /* allocate 2 cache lines or at least one spinlock per cpu */
631 nblocks = max_t(unsigned int, 632 nblocks = max(2U * L1_CACHE_BYTES / locksz, 1U);
632 2 * L1_CACHE_BYTES / sizeof(spinlock_t),
633 1);
634 nblocks = roundup_pow_of_two(nblocks * num_possible_cpus()); 633 nblocks = roundup_pow_of_two(nblocks * num_possible_cpus());
635 634
636 /* no more locks than number of hash buckets */ 635 /* no more locks than number of hash buckets */
637 nblocks = min(nblocks, hashinfo->ehash_mask + 1); 636 nblocks = min(nblocks, hashinfo->ehash_mask + 1);
638 637
639 hashinfo->ehash_locks = kmalloc_array(nblocks, sizeof(spinlock_t), 638 hashinfo->ehash_locks = kmalloc_array(nblocks, locksz,
640 GFP_KERNEL | __GFP_NOWARN); 639 GFP_KERNEL | __GFP_NOWARN);
641 if (!hashinfo->ehash_locks) 640 if (!hashinfo->ehash_locks)
642 hashinfo->ehash_locks = vmalloc(nblocks * sizeof(spinlock_t)); 641 hashinfo->ehash_locks = vmalloc(nblocks * locksz);
643 642
644 if (!hashinfo->ehash_locks) 643 if (!hashinfo->ehash_locks)
645 return -ENOMEM; 644 return -ENOMEM;
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index a50dc6d408d1..921138f6c97c 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -202,7 +202,7 @@ static void ip_expire(unsigned long arg)
202 ipq_kill(qp); 202 ipq_kill(qp);
203 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); 203 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
204 204
205 if (!(qp->q.flags & INET_FRAG_EVICTED)) { 205 if (!inet_frag_evicting(&qp->q)) {
206 struct sk_buff *head = qp->q.fragments; 206 struct sk_buff *head = qp->q.fragments;
207 const struct iphdr *iph; 207 const struct iphdr *iph;
208 int err; 208 int err;
@@ -309,7 +309,7 @@ static int ip_frag_reinit(struct ipq *qp)
309 kfree_skb(fp); 309 kfree_skb(fp);
310 fp = xp; 310 fp = xp;
311 } while (fp); 311 } while (fp);
312 sub_frag_mem_limit(&qp->q, sum_truesize); 312 sub_frag_mem_limit(qp->q.net, sum_truesize);
313 313
314 qp->q.flags = 0; 314 qp->q.flags = 0;
315 qp->q.len = 0; 315 qp->q.len = 0;
@@ -351,7 +351,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
351 ihl = ip_hdrlen(skb); 351 ihl = ip_hdrlen(skb);
352 352
353 /* Determine the position of this fragment. */ 353 /* Determine the position of this fragment. */
354 end = offset + skb->len - ihl; 354 end = offset + skb->len - skb_network_offset(skb) - ihl;
355 err = -EINVAL; 355 err = -EINVAL;
356 356
357 /* Is this the final fragment? */ 357 /* Is this the final fragment? */
@@ -381,7 +381,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
381 goto err; 381 goto err;
382 382
383 err = -ENOMEM; 383 err = -ENOMEM;
384 if (!pskb_pull(skb, ihl)) 384 if (!pskb_pull(skb, skb_network_offset(skb) + ihl))
385 goto err; 385 goto err;
386 386
387 err = pskb_trim_rcsum(skb, end - offset); 387 err = pskb_trim_rcsum(skb, end - offset);
@@ -455,7 +455,7 @@ found:
455 qp->q.fragments = next; 455 qp->q.fragments = next;
456 456
457 qp->q.meat -= free_it->len; 457 qp->q.meat -= free_it->len;
458 sub_frag_mem_limit(&qp->q, free_it->truesize); 458 sub_frag_mem_limit(qp->q.net, free_it->truesize);
459 kfree_skb(free_it); 459 kfree_skb(free_it);
460 } 460 }
461 } 461 }
@@ -479,7 +479,7 @@ found:
479 qp->q.stamp = skb->tstamp; 479 qp->q.stamp = skb->tstamp;
480 qp->q.meat += skb->len; 480 qp->q.meat += skb->len;
481 qp->ecn |= ecn; 481 qp->ecn |= ecn;
482 add_frag_mem_limit(&qp->q, skb->truesize); 482 add_frag_mem_limit(qp->q.net, skb->truesize);
483 if (offset == 0) 483 if (offset == 0)
484 qp->q.flags |= INET_FRAG_FIRST_IN; 484 qp->q.flags |= INET_FRAG_FIRST_IN;
485 485
@@ -587,7 +587,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
587 head->len -= clone->len; 587 head->len -= clone->len;
588 clone->csum = 0; 588 clone->csum = 0;
589 clone->ip_summed = head->ip_summed; 589 clone->ip_summed = head->ip_summed;
590 add_frag_mem_limit(&qp->q, clone->truesize); 590 add_frag_mem_limit(qp->q.net, clone->truesize);
591 } 591 }
592 592
593 skb_push(head, head->data - skb_network_header(head)); 593 skb_push(head, head->data - skb_network_header(head));
@@ -615,7 +615,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
615 } 615 }
616 fp = next; 616 fp = next;
617 } 617 }
618 sub_frag_mem_limit(&qp->q, sum_truesize); 618 sub_frag_mem_limit(qp->q.net, sum_truesize);
619 619
620 head->next = NULL; 620 head->next = NULL;
621 head->dev = dev; 621 head->dev = dev;
@@ -641,6 +641,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
641 iph->frag_off = 0; 641 iph->frag_off = 0;
642 } 642 }
643 643
644 ip_send_check(iph);
645
644 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS); 646 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
645 qp->q.fragments = NULL; 647 qp->q.fragments = NULL;
646 qp->q.fragments_tail = NULL; 648 qp->q.fragments_tail = NULL;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 4c2c3ba4ba65..626d9e56a6bd 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -586,7 +586,8 @@ int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
586EXPORT_SYMBOL(ip_tunnel_encap); 586EXPORT_SYMBOL(ip_tunnel_encap);
587 587
588static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb, 588static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
589 struct rtable *rt, __be16 df) 589 struct rtable *rt, __be16 df,
590 const struct iphdr *inner_iph)
590{ 591{
591 struct ip_tunnel *tunnel = netdev_priv(dev); 592 struct ip_tunnel *tunnel = netdev_priv(dev);
592 int pkt_size = skb->len - tunnel->hlen - dev->hard_header_len; 593 int pkt_size = skb->len - tunnel->hlen - dev->hard_header_len;
@@ -603,7 +604,8 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
603 604
604 if (skb->protocol == htons(ETH_P_IP)) { 605 if (skb->protocol == htons(ETH_P_IP)) {
605 if (!skb_is_gso(skb) && 606 if (!skb_is_gso(skb) &&
606 (df & htons(IP_DF)) && mtu < pkt_size) { 607 (inner_iph->frag_off & htons(IP_DF)) &&
608 mtu < pkt_size) {
607 memset(IPCB(skb), 0, sizeof(*IPCB(skb))); 609 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
608 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); 610 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
609 return -E2BIG; 611 return -E2BIG;
@@ -737,7 +739,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
737 goto tx_error; 739 goto tx_error;
738 } 740 }
739 741
740 if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off)) { 742 if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off, inner_iph)) {
741 ip_rt_put(rt); 743 ip_rt_put(rt);
742 goto tx_error; 744 goto tx_error;
743 } 745 }
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 95c9b6eece25..92305a1a021a 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -254,9 +254,10 @@ unsigned int arpt_do_table(struct sk_buff *skb,
254 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); 254 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
255 unsigned int verdict = NF_DROP; 255 unsigned int verdict = NF_DROP;
256 const struct arphdr *arp; 256 const struct arphdr *arp;
257 struct arpt_entry *e, *back; 257 struct arpt_entry *e, **jumpstack;
258 const char *indev, *outdev; 258 const char *indev, *outdev;
259 const void *table_base; 259 const void *table_base;
260 unsigned int cpu, stackidx = 0;
260 const struct xt_table_info *private; 261 const struct xt_table_info *private;
261 struct xt_action_param acpar; 262 struct xt_action_param acpar;
262 unsigned int addend; 263 unsigned int addend;
@@ -270,15 +271,16 @@ unsigned int arpt_do_table(struct sk_buff *skb,
270 local_bh_disable(); 271 local_bh_disable();
271 addend = xt_write_recseq_begin(); 272 addend = xt_write_recseq_begin();
272 private = table->private; 273 private = table->private;
274 cpu = smp_processor_id();
273 /* 275 /*
274 * Ensure we load private-> members after we've fetched the base 276 * Ensure we load private-> members after we've fetched the base
275 * pointer. 277 * pointer.
276 */ 278 */
277 smp_read_barrier_depends(); 279 smp_read_barrier_depends();
278 table_base = private->entries; 280 table_base = private->entries;
281 jumpstack = (struct arpt_entry **)private->jumpstack[cpu];
279 282
280 e = get_entry(table_base, private->hook_entry[hook]); 283 e = get_entry(table_base, private->hook_entry[hook]);
281 back = get_entry(table_base, private->underflow[hook]);
282 284
283 acpar.in = state->in; 285 acpar.in = state->in;
284 acpar.out = state->out; 286 acpar.out = state->out;
@@ -312,18 +314,23 @@ unsigned int arpt_do_table(struct sk_buff *skb,
312 verdict = (unsigned int)(-v) - 1; 314 verdict = (unsigned int)(-v) - 1;
313 break; 315 break;
314 } 316 }
315 e = back; 317 if (stackidx == 0) {
316 back = get_entry(table_base, back->comefrom); 318 e = get_entry(table_base,
319 private->underflow[hook]);
320 } else {
321 e = jumpstack[--stackidx];
322 e = arpt_next_entry(e);
323 }
317 continue; 324 continue;
318 } 325 }
319 if (table_base + v 326 if (table_base + v
320 != arpt_next_entry(e)) { 327 != arpt_next_entry(e)) {
321 /* Save old back ptr in next entry */
322 struct arpt_entry *next = arpt_next_entry(e);
323 next->comefrom = (void *)back - table_base;
324 328
325 /* set back pointer to next entry */ 329 if (stackidx >= private->stacksize) {
326 back = next; 330 verdict = NF_DROP;
331 break;
332 }
333 jumpstack[stackidx++] = e;
327 } 334 }
328 335
329 e = get_entry(table_base, v); 336 e = get_entry(table_base, v);
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c
index fe8cc183411e..95ea633e8356 100644
--- a/net/ipv4/netfilter/ipt_SYNPROXY.c
+++ b/net/ipv4/netfilter/ipt_SYNPROXY.c
@@ -226,7 +226,8 @@ synproxy_send_client_ack(const struct synproxy_net *snet,
226 226
227 synproxy_build_options(nth, opts); 227 synproxy_build_options(nth, opts);
228 228
229 synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); 229 synproxy_send_tcp(skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
230 niph, nth, tcp_hdr_size);
230} 231}
231 232
232static bool 233static bool
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index d0362a2de3d3..e681b852ced1 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2176,7 +2176,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
2176 if (!res.prefixlen && 2176 if (!res.prefixlen &&
2177 res.table->tb_num_default > 1 && 2177 res.table->tb_num_default > 1 &&
2178 res.type == RTN_UNICAST && !fl4->flowi4_oif) 2178 res.type == RTN_UNICAST && !fl4->flowi4_oif)
2179 fib_select_default(&res); 2179 fib_select_default(fl4, &res);
2180 2180
2181 if (!fl4->saddr) 2181 if (!fl4->saddr)
2182 fl4->saddr = FIB_RES_PREFSRC(net, res); 2182 fl4->saddr = FIB_RES_PREFSRC(net, res);
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 433231ccfb17..0330ab2e2b63 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -41,8 +41,6 @@ static int tcp_syn_retries_min = 1;
41static int tcp_syn_retries_max = MAX_TCP_SYNCNT; 41static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
42static int ip_ping_group_range_min[] = { 0, 0 }; 42static int ip_ping_group_range_min[] = { 0, 0 };
43static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX }; 43static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
44static int min_sndbuf = SOCK_MIN_SNDBUF;
45static int min_rcvbuf = SOCK_MIN_RCVBUF;
46 44
47/* Update system visible IP port range */ 45/* Update system visible IP port range */
48static void set_local_port_range(struct net *net, int range[2]) 46static void set_local_port_range(struct net *net, int range[2])
@@ -530,7 +528,7 @@ static struct ctl_table ipv4_table[] = {
530 .maxlen = sizeof(sysctl_tcp_wmem), 528 .maxlen = sizeof(sysctl_tcp_wmem),
531 .mode = 0644, 529 .mode = 0644,
532 .proc_handler = proc_dointvec_minmax, 530 .proc_handler = proc_dointvec_minmax,
533 .extra1 = &min_sndbuf, 531 .extra1 = &one,
534 }, 532 },
535 { 533 {
536 .procname = "tcp_notsent_lowat", 534 .procname = "tcp_notsent_lowat",
@@ -545,7 +543,7 @@ static struct ctl_table ipv4_table[] = {
545 .maxlen = sizeof(sysctl_tcp_rmem), 543 .maxlen = sizeof(sysctl_tcp_rmem),
546 .mode = 0644, 544 .mode = 0644,
547 .proc_handler = proc_dointvec_minmax, 545 .proc_handler = proc_dointvec_minmax,
548 .extra1 = &min_rcvbuf, 546 .extra1 = &one,
549 }, 547 },
550 { 548 {
551 .procname = "tcp_app_win", 549 .procname = "tcp_app_win",
@@ -758,7 +756,7 @@ static struct ctl_table ipv4_table[] = {
758 .maxlen = sizeof(sysctl_udp_rmem_min), 756 .maxlen = sizeof(sysctl_udp_rmem_min),
759 .mode = 0644, 757 .mode = 0644,
760 .proc_handler = proc_dointvec_minmax, 758 .proc_handler = proc_dointvec_minmax,
761 .extra1 = &min_rcvbuf, 759 .extra1 = &one
762 }, 760 },
763 { 761 {
764 .procname = "udp_wmem_min", 762 .procname = "udp_wmem_min",
@@ -766,7 +764,7 @@ static struct ctl_table ipv4_table[] = {
766 .maxlen = sizeof(sysctl_udp_wmem_min), 764 .maxlen = sizeof(sysctl_udp_wmem_min),
767 .mode = 0644, 765 .mode = 0644,
768 .proc_handler = proc_dointvec_minmax, 766 .proc_handler = proc_dointvec_minmax,
769 .extra1 = &min_sndbuf, 767 .extra1 = &one
770 }, 768 },
771 { } 769 { }
772}; 770};
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 7f4056785acc..45534a5ab430 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -780,7 +780,7 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
780 ret = -EAGAIN; 780 ret = -EAGAIN;
781 break; 781 break;
782 } 782 }
783 sk_wait_data(sk, &timeo); 783 sk_wait_data(sk, &timeo, NULL);
784 if (signal_pending(current)) { 784 if (signal_pending(current)) {
785 ret = sock_intr_errno(timeo); 785 ret = sock_intr_errno(timeo);
786 break; 786 break;
@@ -1575,7 +1575,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
1575 int target; /* Read at least this many bytes */ 1575 int target; /* Read at least this many bytes */
1576 long timeo; 1576 long timeo;
1577 struct task_struct *user_recv = NULL; 1577 struct task_struct *user_recv = NULL;
1578 struct sk_buff *skb; 1578 struct sk_buff *skb, *last;
1579 u32 urg_hole = 0; 1579 u32 urg_hole = 0;
1580 1580
1581 if (unlikely(flags & MSG_ERRQUEUE)) 1581 if (unlikely(flags & MSG_ERRQUEUE))
@@ -1635,7 +1635,9 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
1635 1635
1636 /* Next get a buffer. */ 1636 /* Next get a buffer. */
1637 1637
1638 last = skb_peek_tail(&sk->sk_receive_queue);
1638 skb_queue_walk(&sk->sk_receive_queue, skb) { 1639 skb_queue_walk(&sk->sk_receive_queue, skb) {
1640 last = skb;
1639 /* Now that we have two receive queues this 1641 /* Now that we have two receive queues this
1640 * shouldn't happen. 1642 * shouldn't happen.
1641 */ 1643 */
@@ -1754,8 +1756,9 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
1754 /* Do not sleep, just process backlog. */ 1756 /* Do not sleep, just process backlog. */
1755 release_sock(sk); 1757 release_sock(sk);
1756 lock_sock(sk); 1758 lock_sock(sk);
1757 } else 1759 } else {
1758 sk_wait_data(sk, &timeo); 1760 sk_wait_data(sk, &timeo, last);
1761 }
1759 1762
1760 if (user_recv) { 1763 if (user_recv) {
1761 int chunk; 1764 int chunk;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 684f095d196e..728f5b3d3c64 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1917,14 +1917,13 @@ void tcp_enter_loss(struct sock *sk)
1917 const struct inet_connection_sock *icsk = inet_csk(sk); 1917 const struct inet_connection_sock *icsk = inet_csk(sk);
1918 struct tcp_sock *tp = tcp_sk(sk); 1918 struct tcp_sock *tp = tcp_sk(sk);
1919 struct sk_buff *skb; 1919 struct sk_buff *skb;
1920 bool new_recovery = false; 1920 bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery;
1921 bool is_reneg; /* is receiver reneging on SACKs? */ 1921 bool is_reneg; /* is receiver reneging on SACKs? */
1922 1922
1923 /* Reduce ssthresh if it has not yet been made inside this window. */ 1923 /* Reduce ssthresh if it has not yet been made inside this window. */
1924 if (icsk->icsk_ca_state <= TCP_CA_Disorder || 1924 if (icsk->icsk_ca_state <= TCP_CA_Disorder ||
1925 !after(tp->high_seq, tp->snd_una) || 1925 !after(tp->high_seq, tp->snd_una) ||
1926 (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { 1926 (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) {
1927 new_recovery = true;
1928 tp->prior_ssthresh = tcp_current_ssthresh(sk); 1927 tp->prior_ssthresh = tcp_current_ssthresh(sk);
1929 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); 1928 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
1930 tcp_ca_event(sk, CA_EVENT_LOSS); 1929 tcp_ca_event(sk, CA_EVENT_LOSS);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index d7d4c2b79cf2..0ea2e1c5d395 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1348,7 +1348,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1348 req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr); 1348 req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr);
1349 if (req) { 1349 if (req) {
1350 nsk = tcp_check_req(sk, skb, req, false); 1350 nsk = tcp_check_req(sk, skb, req, false);
1351 if (!nsk) 1351 if (!nsk || nsk == sk)
1352 reqsk_put(req); 1352 reqsk_put(req);
1353 return nsk; 1353 return nsk;
1354 } 1354 }
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 83aa604f9273..1b8c5ba7d5f7 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1995,12 +1995,19 @@ void udp_v4_early_demux(struct sk_buff *skb)
1995 1995
1996 skb->sk = sk; 1996 skb->sk = sk;
1997 skb->destructor = sock_efree; 1997 skb->destructor = sock_efree;
1998 dst = sk->sk_rx_dst; 1998 dst = READ_ONCE(sk->sk_rx_dst);
1999 1999
2000 if (dst) 2000 if (dst)
2001 dst = dst_check(dst, 0); 2001 dst = dst_check(dst, 0);
2002 if (dst) 2002 if (dst) {
2003 skb_dst_set_noref(skb, dst); 2003 /* DST_NOCACHE can not be used without taking a reference */
2004 if (dst->flags & DST_NOCACHE) {
2005 if (likely(atomic_inc_not_zero(&dst->__refcnt)))
2006 skb_dst_set(skb, dst);
2007 } else {
2008 skb_dst_set_noref(skb, dst);
2009 }
2010 }
2004} 2011}
2005 2012
2006int udp_rcv(struct sk_buff *skb) 2013int udp_rcv(struct sk_buff *skb)
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 62d908e64eeb..b10a88986a98 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -40,7 +40,7 @@ static bool ipv6_mapped_addr_any(const struct in6_addr *a)
40 return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0); 40 return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0);
41} 41}
42 42
43int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 43static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
44{ 44{
45 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; 45 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
46 struct inet_sock *inet = inet_sk(sk); 46 struct inet_sock *inet = inet_sk(sk);
@@ -56,7 +56,7 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
56 if (usin->sin6_family == AF_INET) { 56 if (usin->sin6_family == AF_INET) {
57 if (__ipv6_only_sock(sk)) 57 if (__ipv6_only_sock(sk))
58 return -EAFNOSUPPORT; 58 return -EAFNOSUPPORT;
59 err = ip4_datagram_connect(sk, uaddr, addr_len); 59 err = __ip4_datagram_connect(sk, uaddr, addr_len);
60 goto ipv4_connected; 60 goto ipv4_connected;
61 } 61 }
62 62
@@ -98,9 +98,9 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
98 sin.sin_addr.s_addr = daddr->s6_addr32[3]; 98 sin.sin_addr.s_addr = daddr->s6_addr32[3];
99 sin.sin_port = usin->sin6_port; 99 sin.sin_port = usin->sin6_port;
100 100
101 err = ip4_datagram_connect(sk, 101 err = __ip4_datagram_connect(sk,
102 (struct sockaddr *) &sin, 102 (struct sockaddr *) &sin,
103 sizeof(sin)); 103 sizeof(sin));
104 104
105ipv4_connected: 105ipv4_connected:
106 if (err) 106 if (err)
@@ -204,6 +204,16 @@ out:
204 fl6_sock_release(flowlabel); 204 fl6_sock_release(flowlabel);
205 return err; 205 return err;
206} 206}
207
208int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
209{
210 int res;
211
212 lock_sock(sk);
213 res = __ip6_datagram_connect(sk, uaddr, addr_len);
214 release_sock(sk);
215 return res;
216}
207EXPORT_SYMBOL_GPL(ip6_datagram_connect); 217EXPORT_SYMBOL_GPL(ip6_datagram_connect);
208 218
209int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *uaddr, 219int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *uaddr,
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 55d19861ab20..548c6237b1e7 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -172,6 +172,8 @@ static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
172 *ppcpu_rt = NULL; 172 *ppcpu_rt = NULL;
173 } 173 }
174 } 174 }
175
176 non_pcpu_rt->rt6i_pcpu = NULL;
175} 177}
176 178
177static void rt6_release(struct rt6_info *rt) 179static void rt6_release(struct rt6_info *rt)
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index f2e464eba5ef..57990c929cd8 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -331,10 +331,10 @@ int ip6_mc_input(struct sk_buff *skb)
331 if (offset < 0) 331 if (offset < 0)
332 goto out; 332 goto out;
333 333
334 if (!ipv6_is_mld(skb, nexthdr, offset)) 334 if (ipv6_is_mld(skb, nexthdr, offset))
335 goto out; 335 deliver = true;
336 336
337 deliver = true; 337 goto out;
338 } 338 }
339 /* unknown RA - process it normally */ 339 /* unknown RA - process it normally */
340 } 340 }
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index e893cd18612f..08b62047c67f 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -292,8 +292,6 @@ static struct packet_offload ipv6_packet_offload __read_mostly = {
292static const struct net_offload sit_offload = { 292static const struct net_offload sit_offload = {
293 .callbacks = { 293 .callbacks = {
294 .gso_segment = ipv6_gso_segment, 294 .gso_segment = ipv6_gso_segment,
295 .gro_receive = ipv6_gro_receive,
296 .gro_complete = ipv6_gro_complete,
297 }, 295 },
298}; 296};
299 297
diff --git a/net/ipv6/mcast_snoop.c b/net/ipv6/mcast_snoop.c
index df8afe5ab31e..9405b04eecc6 100644
--- a/net/ipv6/mcast_snoop.c
+++ b/net/ipv6/mcast_snoop.c
@@ -143,34 +143,36 @@ static int __ipv6_mc_check_mld(struct sk_buff *skb,
143 struct sk_buff *skb_chk = NULL; 143 struct sk_buff *skb_chk = NULL;
144 unsigned int transport_len; 144 unsigned int transport_len;
145 unsigned int len = skb_transport_offset(skb) + sizeof(struct mld_msg); 145 unsigned int len = skb_transport_offset(skb) + sizeof(struct mld_msg);
146 int ret; 146 int ret = -EINVAL;
147 147
148 transport_len = ntohs(ipv6_hdr(skb)->payload_len); 148 transport_len = ntohs(ipv6_hdr(skb)->payload_len);
149 transport_len -= skb_transport_offset(skb) - sizeof(struct ipv6hdr); 149 transport_len -= skb_transport_offset(skb) - sizeof(struct ipv6hdr);
150 150
151 skb_get(skb);
152 skb_chk = skb_checksum_trimmed(skb, transport_len, 151 skb_chk = skb_checksum_trimmed(skb, transport_len,
153 ipv6_mc_validate_checksum); 152 ipv6_mc_validate_checksum);
154 if (!skb_chk) 153 if (!skb_chk)
155 return -EINVAL; 154 goto err;
156 155
157 if (!pskb_may_pull(skb_chk, len)) { 156 if (!pskb_may_pull(skb_chk, len))
158 kfree_skb(skb_chk); 157 goto err;
159 return -EINVAL;
160 }
161 158
162 ret = ipv6_mc_check_mld_msg(skb_chk); 159 ret = ipv6_mc_check_mld_msg(skb_chk);
163 if (ret) { 160 if (ret)
164 kfree_skb(skb_chk); 161 goto err;
165 return ret;
166 }
167 162
168 if (skb_trimmed) 163 if (skb_trimmed)
169 *skb_trimmed = skb_chk; 164 *skb_trimmed = skb_chk;
170 else 165 /* free now unneeded clone */
166 else if (skb_chk != skb)
171 kfree_skb(skb_chk); 167 kfree_skb(skb_chk);
172 168
173 return 0; 169 ret = 0;
170
171err:
172 if (ret && skb_chk && skb_chk != skb)
173 kfree_skb(skb_chk);
174
175 return ret;
174} 176}
175 177
176/** 178/**
@@ -179,7 +181,7 @@ static int __ipv6_mc_check_mld(struct sk_buff *skb,
179 * @skb_trimmed: to store an skb pointer trimmed to IPv6 packet tail (optional) 181 * @skb_trimmed: to store an skb pointer trimmed to IPv6 packet tail (optional)
180 * 182 *
181 * Checks whether an IPv6 packet is a valid MLD packet. If so sets 183 * Checks whether an IPv6 packet is a valid MLD packet. If so sets
182 * skb network and transport headers accordingly and returns zero. 184 * skb transport header accordingly and returns zero.
183 * 185 *
184 * -EINVAL: A broken packet was detected, i.e. it violates some internet 186 * -EINVAL: A broken packet was detected, i.e. it violates some internet
185 * standard 187 * standard
@@ -194,7 +196,8 @@ static int __ipv6_mc_check_mld(struct sk_buff *skb,
194 * to leave the original skb and its full frame unchanged (which might be 196 * to leave the original skb and its full frame unchanged (which might be
195 * desirable for layer 2 frame jugglers). 197 * desirable for layer 2 frame jugglers).
196 * 198 *
197 * The caller needs to release a reference count from any returned skb_trimmed. 199 * Caller needs to set the skb network header and free any returned skb if it
200 * differs from the provided skb.
198 */ 201 */
199int ipv6_mc_check_mld(struct sk_buff *skb, struct sk_buff **skb_trimmed) 202int ipv6_mc_check_mld(struct sk_buff *skb, struct sk_buff **skb_trimmed)
200{ 203{
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 0a05b35a90fc..c53331cfed95 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1650,6 +1650,7 @@ int ndisc_rcv(struct sk_buff *skb)
1650static int ndisc_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) 1650static int ndisc_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
1651{ 1651{
1652 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1652 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1653 struct netdev_notifier_change_info *change_info;
1653 struct net *net = dev_net(dev); 1654 struct net *net = dev_net(dev);
1654 struct inet6_dev *idev; 1655 struct inet6_dev *idev;
1655 1656
@@ -1664,6 +1665,11 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
1664 ndisc_send_unsol_na(dev); 1665 ndisc_send_unsol_na(dev);
1665 in6_dev_put(idev); 1666 in6_dev_put(idev);
1666 break; 1667 break;
1668 case NETDEV_CHANGE:
1669 change_info = ptr;
1670 if (change_info->flags_changed & IFF_NOARP)
1671 neigh_changeaddr(&nd_tbl, dev);
1672 break;
1667 case NETDEV_DOWN: 1673 case NETDEV_DOWN:
1668 neigh_ifdown(&nd_tbl, dev); 1674 neigh_ifdown(&nd_tbl, dev);
1669 fib6_run_gc(0, net, false); 1675 fib6_run_gc(0, net, false);
diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c
index 6edb7b106de7..ebbb754c2111 100644
--- a/net/ipv6/netfilter/ip6t_SYNPROXY.c
+++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c
@@ -37,12 +37,13 @@ synproxy_build_ip(struct sk_buff *skb, const struct in6_addr *saddr,
37} 37}
38 38
39static void 39static void
40synproxy_send_tcp(const struct sk_buff *skb, struct sk_buff *nskb, 40synproxy_send_tcp(const struct synproxy_net *snet,
41 const struct sk_buff *skb, struct sk_buff *nskb,
41 struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo, 42 struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo,
42 struct ipv6hdr *niph, struct tcphdr *nth, 43 struct ipv6hdr *niph, struct tcphdr *nth,
43 unsigned int tcp_hdr_size) 44 unsigned int tcp_hdr_size)
44{ 45{
45 struct net *net = nf_ct_net((struct nf_conn *)nfct); 46 struct net *net = nf_ct_net(snet->tmpl);
46 struct dst_entry *dst; 47 struct dst_entry *dst;
47 struct flowi6 fl6; 48 struct flowi6 fl6;
48 49
@@ -83,7 +84,8 @@ free_nskb:
83} 84}
84 85
85static void 86static void
86synproxy_send_client_synack(const struct sk_buff *skb, const struct tcphdr *th, 87synproxy_send_client_synack(const struct synproxy_net *snet,
88 const struct sk_buff *skb, const struct tcphdr *th,
87 const struct synproxy_options *opts) 89 const struct synproxy_options *opts)
88{ 90{
89 struct sk_buff *nskb; 91 struct sk_buff *nskb;
@@ -119,7 +121,7 @@ synproxy_send_client_synack(const struct sk_buff *skb, const struct tcphdr *th,
119 121
120 synproxy_build_options(nth, opts); 122 synproxy_build_options(nth, opts);
121 123
122 synproxy_send_tcp(skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, 124 synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
123 niph, nth, tcp_hdr_size); 125 niph, nth, tcp_hdr_size);
124} 126}
125 127
@@ -163,7 +165,7 @@ synproxy_send_server_syn(const struct synproxy_net *snet,
163 165
164 synproxy_build_options(nth, opts); 166 synproxy_build_options(nth, opts);
165 167
166 synproxy_send_tcp(skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW, 168 synproxy_send_tcp(snet, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
167 niph, nth, tcp_hdr_size); 169 niph, nth, tcp_hdr_size);
168} 170}
169 171
@@ -203,7 +205,7 @@ synproxy_send_server_ack(const struct synproxy_net *snet,
203 205
204 synproxy_build_options(nth, opts); 206 synproxy_build_options(nth, opts);
205 207
206 synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); 208 synproxy_send_tcp(snet, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
207} 209}
208 210
209static void 211static void
@@ -241,7 +243,8 @@ synproxy_send_client_ack(const struct synproxy_net *snet,
241 243
242 synproxy_build_options(nth, opts); 244 synproxy_build_options(nth, opts);
243 245
244 synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); 246 synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
247 niph, nth, tcp_hdr_size);
245} 248}
246 249
247static bool 250static bool
@@ -301,7 +304,7 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par)
301 XT_SYNPROXY_OPT_SACK_PERM | 304 XT_SYNPROXY_OPT_SACK_PERM |
302 XT_SYNPROXY_OPT_ECN); 305 XT_SYNPROXY_OPT_ECN);
303 306
304 synproxy_send_client_synack(skb, th, &opts); 307 synproxy_send_client_synack(snet, skb, th, &opts);
305 return NF_DROP; 308 return NF_DROP;
306 309
307 } else if (th->ack && !(th->fin || th->rst || th->syn)) { 310 } else if (th->ack && !(th->fin || th->rst || th->syn)) {
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 6f187c8d8a1b..6d02498172c1 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -348,7 +348,7 @@ found:
348 fq->ecn |= ecn; 348 fq->ecn |= ecn;
349 if (payload_len > fq->q.max_size) 349 if (payload_len > fq->q.max_size)
350 fq->q.max_size = payload_len; 350 fq->q.max_size = payload_len;
351 add_frag_mem_limit(&fq->q, skb->truesize); 351 add_frag_mem_limit(fq->q.net, skb->truesize);
352 352
353 /* The first fragment. 353 /* The first fragment.
354 * nhoffset is obtained from the first fragment, of course. 354 * nhoffset is obtained from the first fragment, of course.
@@ -430,7 +430,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev)
430 clone->ip_summed = head->ip_summed; 430 clone->ip_summed = head->ip_summed;
431 431
432 NFCT_FRAG6_CB(clone)->orig = NULL; 432 NFCT_FRAG6_CB(clone)->orig = NULL;
433 add_frag_mem_limit(&fq->q, clone->truesize); 433 add_frag_mem_limit(fq->q.net, clone->truesize);
434 } 434 }
435 435
436 /* We have to remove fragment header from datagram and to relocate 436 /* We have to remove fragment header from datagram and to relocate
@@ -454,7 +454,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev)
454 head->csum = csum_add(head->csum, fp->csum); 454 head->csum = csum_add(head->csum, fp->csum);
455 head->truesize += fp->truesize; 455 head->truesize += fp->truesize;
456 } 456 }
457 sub_frag_mem_limit(&fq->q, head->truesize); 457 sub_frag_mem_limit(fq->q.net, head->truesize);
458 458
459 head->ignore_df = 1; 459 head->ignore_df = 1;
460 head->next = NULL; 460 head->next = NULL;
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 8ffa2c8cce77..f1159bb76e0a 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -144,7 +144,7 @@ void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
144 144
145 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); 145 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
146 146
147 if (fq->q.flags & INET_FRAG_EVICTED) 147 if (inet_frag_evicting(&fq->q))
148 goto out_rcu_unlock; 148 goto out_rcu_unlock;
149 149
150 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT); 150 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
@@ -330,7 +330,7 @@ found:
330 fq->q.stamp = skb->tstamp; 330 fq->q.stamp = skb->tstamp;
331 fq->q.meat += skb->len; 331 fq->q.meat += skb->len;
332 fq->ecn |= ecn; 332 fq->ecn |= ecn;
333 add_frag_mem_limit(&fq->q, skb->truesize); 333 add_frag_mem_limit(fq->q.net, skb->truesize);
334 334
335 /* The first fragment. 335 /* The first fragment.
336 * nhoffset is obtained from the first fragment, of course. 336 * nhoffset is obtained from the first fragment, of course.
@@ -443,7 +443,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
443 head->len -= clone->len; 443 head->len -= clone->len;
444 clone->csum = 0; 444 clone->csum = 0;
445 clone->ip_summed = head->ip_summed; 445 clone->ip_summed = head->ip_summed;
446 add_frag_mem_limit(&fq->q, clone->truesize); 446 add_frag_mem_limit(fq->q.net, clone->truesize);
447 } 447 }
448 448
449 /* We have to remove fragment header from datagram and to relocate 449 /* We have to remove fragment header from datagram and to relocate
@@ -481,7 +481,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
481 } 481 }
482 fp = next; 482 fp = next;
483 } 483 }
484 sub_frag_mem_limit(&fq->q, sum_truesize); 484 sub_frag_mem_limit(fq->q.net, sum_truesize);
485 485
486 head->next = NULL; 486 head->next = NULL;
487 head->dev = dev; 487 head->dev = dev;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 1a1122a6bbf5..d15586490cec 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -318,8 +318,7 @@ static const struct rt6_info ip6_blk_hole_entry_template = {
318/* allocate dst with ip6_dst_ops */ 318/* allocate dst with ip6_dst_ops */
319static struct rt6_info *__ip6_dst_alloc(struct net *net, 319static struct rt6_info *__ip6_dst_alloc(struct net *net,
320 struct net_device *dev, 320 struct net_device *dev,
321 int flags, 321 int flags)
322 struct fib6_table *table)
323{ 322{
324 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev, 323 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
325 0, DST_OBSOLETE_FORCE_CHK, flags); 324 0, DST_OBSOLETE_FORCE_CHK, flags);
@@ -336,10 +335,9 @@ static struct rt6_info *__ip6_dst_alloc(struct net *net,
336 335
337static struct rt6_info *ip6_dst_alloc(struct net *net, 336static struct rt6_info *ip6_dst_alloc(struct net *net,
338 struct net_device *dev, 337 struct net_device *dev,
339 int flags, 338 int flags)
340 struct fib6_table *table)
341{ 339{
342 struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags, table); 340 struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags);
343 341
344 if (rt) { 342 if (rt) {
345 rt->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_ATOMIC); 343 rt->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_ATOMIC);
@@ -369,10 +367,7 @@ static void ip6_dst_destroy(struct dst_entry *dst)
369 struct inet6_dev *idev; 367 struct inet6_dev *idev;
370 368
371 dst_destroy_metrics_generic(dst); 369 dst_destroy_metrics_generic(dst);
372 370 free_percpu(rt->rt6i_pcpu);
373 if (rt->rt6i_pcpu)
374 free_percpu(rt->rt6i_pcpu);
375
376 rt6_uncached_list_del(rt); 371 rt6_uncached_list_del(rt);
377 372
378 idev = rt->rt6i_idev; 373 idev = rt->rt6i_idev;
@@ -953,8 +948,7 @@ static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort,
953 if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU)) 948 if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU))
954 ort = (struct rt6_info *)ort->dst.from; 949 ort = (struct rt6_info *)ort->dst.from;
955 950
956 rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev, 951 rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev, 0);
957 0, ort->rt6i_table);
958 952
959 if (!rt) 953 if (!rt)
960 return NULL; 954 return NULL;
@@ -986,8 +980,7 @@ static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt)
986 struct rt6_info *pcpu_rt; 980 struct rt6_info *pcpu_rt;
987 981
988 pcpu_rt = __ip6_dst_alloc(dev_net(rt->dst.dev), 982 pcpu_rt = __ip6_dst_alloc(dev_net(rt->dst.dev),
989 rt->dst.dev, rt->dst.flags, 983 rt->dst.dev, rt->dst.flags);
990 rt->rt6i_table);
991 984
992 if (!pcpu_rt) 985 if (!pcpu_rt)
993 return NULL; 986 return NULL;
@@ -1000,32 +993,53 @@ static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt)
1000/* It should be called with read_lock_bh(&tb6_lock) acquired */ 993/* It should be called with read_lock_bh(&tb6_lock) acquired */
1001static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt) 994static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt)
1002{ 995{
1003 struct rt6_info *pcpu_rt, *prev, **p; 996 struct rt6_info *pcpu_rt, **p;
1004 997
1005 p = this_cpu_ptr(rt->rt6i_pcpu); 998 p = this_cpu_ptr(rt->rt6i_pcpu);
1006 pcpu_rt = *p; 999 pcpu_rt = *p;
1007 1000
1008 if (pcpu_rt) 1001 if (pcpu_rt) {
1009 goto done; 1002 dst_hold(&pcpu_rt->dst);
1003 rt6_dst_from_metrics_check(pcpu_rt);
1004 }
1005 return pcpu_rt;
1006}
1007
1008static struct rt6_info *rt6_make_pcpu_route(struct rt6_info *rt)
1009{
1010 struct fib6_table *table = rt->rt6i_table;
1011 struct rt6_info *pcpu_rt, *prev, **p;
1010 1012
1011 pcpu_rt = ip6_rt_pcpu_alloc(rt); 1013 pcpu_rt = ip6_rt_pcpu_alloc(rt);
1012 if (!pcpu_rt) { 1014 if (!pcpu_rt) {
1013 struct net *net = dev_net(rt->dst.dev); 1015 struct net *net = dev_net(rt->dst.dev);
1014 1016
1015 pcpu_rt = net->ipv6.ip6_null_entry; 1017 dst_hold(&net->ipv6.ip6_null_entry->dst);
1016 goto done; 1018 return net->ipv6.ip6_null_entry;
1017 } 1019 }
1018 1020
1019 prev = cmpxchg(p, NULL, pcpu_rt); 1021 read_lock_bh(&table->tb6_lock);
1020 if (prev) { 1022 if (rt->rt6i_pcpu) {
1021 /* If someone did it before us, return prev instead */ 1023 p = this_cpu_ptr(rt->rt6i_pcpu);
1024 prev = cmpxchg(p, NULL, pcpu_rt);
1025 if (prev) {
1026 /* If someone did it before us, return prev instead */
1027 dst_destroy(&pcpu_rt->dst);
1028 pcpu_rt = prev;
1029 }
1030 } else {
1031 /* rt has been removed from the fib6 tree
1032 * before we have a chance to acquire the read_lock.
1033 * In this case, don't brother to create a pcpu rt
1034 * since rt is going away anyway. The next
1035 * dst_check() will trigger a re-lookup.
1036 */
1022 dst_destroy(&pcpu_rt->dst); 1037 dst_destroy(&pcpu_rt->dst);
1023 pcpu_rt = prev; 1038 pcpu_rt = rt;
1024 } 1039 }
1025
1026done:
1027 dst_hold(&pcpu_rt->dst); 1040 dst_hold(&pcpu_rt->dst);
1028 rt6_dst_from_metrics_check(pcpu_rt); 1041 rt6_dst_from_metrics_check(pcpu_rt);
1042 read_unlock_bh(&table->tb6_lock);
1029 return pcpu_rt; 1043 return pcpu_rt;
1030} 1044}
1031 1045
@@ -1100,9 +1114,22 @@ redo_rt6_select:
1100 rt->dst.lastuse = jiffies; 1114 rt->dst.lastuse = jiffies;
1101 rt->dst.__use++; 1115 rt->dst.__use++;
1102 pcpu_rt = rt6_get_pcpu_route(rt); 1116 pcpu_rt = rt6_get_pcpu_route(rt);
1103 read_unlock_bh(&table->tb6_lock); 1117
1118 if (pcpu_rt) {
1119 read_unlock_bh(&table->tb6_lock);
1120 } else {
1121 /* We have to do the read_unlock first
1122 * because rt6_make_pcpu_route() may trigger
1123 * ip6_dst_gc() which will take the write_lock.
1124 */
1125 dst_hold(&rt->dst);
1126 read_unlock_bh(&table->tb6_lock);
1127 pcpu_rt = rt6_make_pcpu_route(rt);
1128 dst_release(&rt->dst);
1129 }
1104 1130
1105 return pcpu_rt; 1131 return pcpu_rt;
1132
1106 } 1133 }
1107} 1134}
1108 1135
@@ -1558,7 +1585,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1558 if (unlikely(!idev)) 1585 if (unlikely(!idev))
1559 return ERR_PTR(-ENODEV); 1586 return ERR_PTR(-ENODEV);
1560 1587
1561 rt = ip6_dst_alloc(net, dev, 0, NULL); 1588 rt = ip6_dst_alloc(net, dev, 0);
1562 if (unlikely(!rt)) { 1589 if (unlikely(!rt)) {
1563 in6_dev_put(idev); 1590 in6_dev_put(idev);
1564 dst = ERR_PTR(-ENOMEM); 1591 dst = ERR_PTR(-ENOMEM);
@@ -1745,7 +1772,8 @@ int ip6_route_add(struct fib6_config *cfg)
1745 if (!table) 1772 if (!table)
1746 goto out; 1773 goto out;
1747 1774
1748 rt = ip6_dst_alloc(net, NULL, (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT, table); 1775 rt = ip6_dst_alloc(net, NULL,
1776 (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT);
1749 1777
1750 if (!rt) { 1778 if (!rt) {
1751 err = -ENOMEM; 1779 err = -ENOMEM;
@@ -1834,6 +1862,7 @@ int ip6_route_add(struct fib6_config *cfg)
1834 int gwa_type; 1862 int gwa_type;
1835 1863
1836 gw_addr = &cfg->fc_gateway; 1864 gw_addr = &cfg->fc_gateway;
1865 gwa_type = ipv6_addr_type(gw_addr);
1837 1866
1838 /* if gw_addr is local we will fail to detect this in case 1867 /* if gw_addr is local we will fail to detect this in case
1839 * address is still TENTATIVE (DAD in progress). rt6_lookup() 1868 * address is still TENTATIVE (DAD in progress). rt6_lookup()
@@ -1841,11 +1870,12 @@ int ip6_route_add(struct fib6_config *cfg)
1841 * prefix route was assigned to, which might be non-loopback. 1870 * prefix route was assigned to, which might be non-loopback.
1842 */ 1871 */
1843 err = -EINVAL; 1872 err = -EINVAL;
1844 if (ipv6_chk_addr_and_flags(net, gw_addr, NULL, 0, 0)) 1873 if (ipv6_chk_addr_and_flags(net, gw_addr,
1874 gwa_type & IPV6_ADDR_LINKLOCAL ?
1875 dev : NULL, 0, 0))
1845 goto out; 1876 goto out;
1846 1877
1847 rt->rt6i_gateway = *gw_addr; 1878 rt->rt6i_gateway = *gw_addr;
1848 gwa_type = ipv6_addr_type(gw_addr);
1849 1879
1850 if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) { 1880 if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
1851 struct rt6_info *grt; 1881 struct rt6_info *grt;
@@ -2400,7 +2430,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2400{ 2430{
2401 struct net *net = dev_net(idev->dev); 2431 struct net *net = dev_net(idev->dev);
2402 struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev, 2432 struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev,
2403 DST_NOCOUNT, NULL); 2433 DST_NOCOUNT);
2404 if (!rt) 2434 if (!rt)
2405 return ERR_PTR(-ENOMEM); 2435 return ERR_PTR(-ENOMEM);
2406 2436
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 6748c4277aff..7a6cea5e4274 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -943,7 +943,7 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
943 &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb)); 943 &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
944 if (req) { 944 if (req) {
945 nsk = tcp_check_req(sk, skb, req, false); 945 nsk = tcp_check_req(sk, skb, req, false);
946 if (!nsk) 946 if (!nsk || nsk == sk)
947 reqsk_put(req); 947 reqsk_put(req);
948 return nsk; 948 return nsk;
949 } 949 }
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 8fd9febaa5ba..8dab4e569571 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -613,7 +613,7 @@ static int llc_wait_data(struct sock *sk, long timeo)
613 if (signal_pending(current)) 613 if (signal_pending(current))
614 break; 614 break;
615 rc = 0; 615 rc = 0;
616 if (sk_wait_data(sk, &timeo)) 616 if (sk_wait_data(sk, &timeo, NULL))
617 break; 617 break;
618 } 618 }
619 return rc; 619 return rc;
@@ -802,7 +802,7 @@ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
802 release_sock(sk); 802 release_sock(sk);
803 lock_sock(sk); 803 lock_sock(sk);
804 } else 804 } else
805 sk_wait_data(sk, &timeo); 805 sk_wait_data(sk, &timeo, NULL);
806 806
807 if ((flags & MSG_PEEK) && peek_seq != llc->copied_seq) { 807 if ((flags & MSG_PEEK) && peek_seq != llc->copied_seq) {
808 net_dbg_ratelimited("LLC(%s:%d): Application bug, race in MSG_PEEK\n", 808 net_dbg_ratelimited("LLC(%s:%d): Application bug, race in MSG_PEEK\n",
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 29236e832e44..c09c0131bfa2 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -723,6 +723,7 @@ void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata)
723 723
724 debugfs_remove_recursive(sdata->vif.debugfs_dir); 724 debugfs_remove_recursive(sdata->vif.debugfs_dir);
725 sdata->vif.debugfs_dir = NULL; 725 sdata->vif.debugfs_dir = NULL;
726 sdata->debugfs.subdir_stations = NULL;
726} 727}
727 728
728void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata) 729void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata)
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index ed1edac14372..553ac6dd4867 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1863,10 +1863,6 @@ void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata)
1863 ieee80211_teardown_sdata(sdata); 1863 ieee80211_teardown_sdata(sdata);
1864} 1864}
1865 1865
1866/*
1867 * Remove all interfaces, may only be called at hardware unregistration
1868 * time because it doesn't do RCU-safe list removals.
1869 */
1870void ieee80211_remove_interfaces(struct ieee80211_local *local) 1866void ieee80211_remove_interfaces(struct ieee80211_local *local)
1871{ 1867{
1872 struct ieee80211_sub_if_data *sdata, *tmp; 1868 struct ieee80211_sub_if_data *sdata, *tmp;
@@ -1875,14 +1871,21 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
1875 1871
1876 ASSERT_RTNL(); 1872 ASSERT_RTNL();
1877 1873
1878 /* 1874 /* Before destroying the interfaces, make sure they're all stopped so
1879 * Close all AP_VLAN interfaces first, as otherwise they 1875 * that the hardware is stopped. Otherwise, the driver might still be
1880 * might be closed while the AP interface they belong to 1876 * iterating the interfaces during the shutdown, e.g. from a worker
1881 * is closed, causing unregister_netdevice_many() to crash. 1877 * or from RX processing or similar, and if it does so (using atomic
1878 * iteration) while we're manipulating the list, the iteration will
1879 * crash.
1880 *
1881 * After this, the hardware should be stopped and the driver should
1882 * have stopped all of its activities, so that we can do RCU-unaware
1883 * manipulations of the interface list below.
1882 */ 1884 */
1883 list_for_each_entry(sdata, &local->interfaces, list) 1885 cfg80211_shutdown_all_interfaces(local->hw.wiphy);
1884 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 1886
1885 dev_close(sdata->dev); 1887 WARN(local->open_count, "%s: open count remains %d\n",
1888 wiphy_name(local->hw.wiphy), local->open_count);
1886 1889
1887 mutex_lock(&local->iflist_mtx); 1890 mutex_lock(&local->iflist_mtx);
1888 list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) { 1891 list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 5438d13e2f00..3b59099413fb 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -306,7 +306,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
306 if (action == WLAN_SP_MESH_PEERING_CONFIRM) { 306 if (action == WLAN_SP_MESH_PEERING_CONFIRM) {
307 /* AID */ 307 /* AID */
308 pos = skb_put(skb, 2); 308 pos = skb_put(skb, 2);
309 put_unaligned_le16(plid, pos + 2); 309 put_unaligned_le16(plid, pos);
310 } 310 }
311 if (ieee80211_add_srates_ie(sdata, skb, true, band) || 311 if (ieee80211_add_srates_ie(sdata, skb, true, band) ||
312 ieee80211_add_ext_srates_ie(sdata, skb, true, band) || 312 ieee80211_add_ext_srates_ie(sdata, skb, true, band) ||
@@ -1122,6 +1122,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata,
1122 WLAN_SP_MESH_PEERING_CONFIRM) { 1122 WLAN_SP_MESH_PEERING_CONFIRM) {
1123 baseaddr += 4; 1123 baseaddr += 4;
1124 baselen += 4; 1124 baselen += 4;
1125
1126 if (baselen > len)
1127 return;
1125 } 1128 }
1126 ieee802_11_parse_elems(baseaddr, len - baselen, true, &elems); 1129 ieee802_11_parse_elems(baseaddr, len - baselen, true, &elems);
1127 mesh_process_plink_frame(sdata, mgmt, &elems); 1130 mesh_process_plink_frame(sdata, mgmt, &elems);
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 06b60980c62c..b676b9fa707b 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -76,6 +76,22 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
76 if (sdata->vif.type != NL80211_IFTYPE_STATION) 76 if (sdata->vif.type != NL80211_IFTYPE_STATION)
77 continue; 77 continue;
78 ieee80211_mgd_quiesce(sdata); 78 ieee80211_mgd_quiesce(sdata);
79 /* If suspended during TX in progress, and wowlan
80 * is enabled (connection will be active) there
81 * can be a race where the driver is put out
82 * of power-save due to TX and during suspend
83 * dynamic_ps_timer is cancelled and TX packet
84 * is flushed, leaving the driver in ACTIVE even
85 * after resuming until dynamic_ps_timer puts
86 * driver back in DOZE.
87 */
88 if (sdata->u.mgd.associated &&
89 sdata->u.mgd.powersave &&
90 !(local->hw.conf.flags & IEEE80211_CONF_PS)) {
91 local->hw.conf.flags |= IEEE80211_CONF_PS;
92 ieee80211_hw_config(local,
93 IEEE80211_CONF_CHANGE_PS);
94 }
79 } 95 }
80 96
81 err = drv_suspend(local, wowlan); 97 err = drv_suspend(local, wowlan);
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 247552a7f6c2..3ece7d1034c8 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -92,14 +92,15 @@ int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_ewma)
92static inline void 92static inline void
93minstrel_sort_best_tp_rates(struct minstrel_sta_info *mi, int i, u8 *tp_list) 93minstrel_sort_best_tp_rates(struct minstrel_sta_info *mi, int i, u8 *tp_list)
94{ 94{
95 int j = MAX_THR_RATES; 95 int j;
96 struct minstrel_rate_stats *tmp_mrs = &mi->r[j - 1].stats; 96 struct minstrel_rate_stats *tmp_mrs;
97 struct minstrel_rate_stats *cur_mrs = &mi->r[i].stats; 97 struct minstrel_rate_stats *cur_mrs = &mi->r[i].stats;
98 98
99 while (j > 0 && (minstrel_get_tp_avg(&mi->r[i], cur_mrs->prob_ewma) > 99 for (j = MAX_THR_RATES; j > 0; --j) {
100 minstrel_get_tp_avg(&mi->r[tp_list[j - 1]], tmp_mrs->prob_ewma))) {
101 j--;
102 tmp_mrs = &mi->r[tp_list[j - 1]].stats; 100 tmp_mrs = &mi->r[tp_list[j - 1]].stats;
101 if (minstrel_get_tp_avg(&mi->r[i], cur_mrs->prob_ewma) <=
102 minstrel_get_tp_avg(&mi->r[tp_list[j - 1]], tmp_mrs->prob_ewma))
103 break;
103 } 104 }
104 105
105 if (j < MAX_THR_RATES - 1) 106 if (j < MAX_THR_RATES - 1)
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index ad31b2dab4f5..8db6e2994bbc 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -60,6 +60,7 @@ ieee80211_tdls_add_subband(struct ieee80211_sub_if_data *sdata,
60 struct ieee80211_channel *ch; 60 struct ieee80211_channel *ch;
61 struct cfg80211_chan_def chandef; 61 struct cfg80211_chan_def chandef;
62 int i, subband_start; 62 int i, subband_start;
63 struct wiphy *wiphy = sdata->local->hw.wiphy;
63 64
64 for (i = start; i <= end; i += spacing) { 65 for (i = start; i <= end; i += spacing) {
65 if (!ch_cnt) 66 if (!ch_cnt)
@@ -70,9 +71,8 @@ ieee80211_tdls_add_subband(struct ieee80211_sub_if_data *sdata,
70 /* we will be active on the channel */ 71 /* we will be active on the channel */
71 cfg80211_chandef_create(&chandef, ch, 72 cfg80211_chandef_create(&chandef, ch,
72 NL80211_CHAN_NO_HT); 73 NL80211_CHAN_NO_HT);
73 if (cfg80211_reg_can_beacon(sdata->local->hw.wiphy, 74 if (cfg80211_reg_can_beacon_relax(wiphy, &chandef,
74 &chandef, 75 sdata->wdev.iftype)) {
75 sdata->wdev.iftype)) {
76 ch_cnt++; 76 ch_cnt++;
77 /* 77 /*
78 * check if the next channel is also part of 78 * check if the next channel is also part of
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 8410bb3bf5e8..b8233505bf9f 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1117,7 +1117,9 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
1117 queued = true; 1117 queued = true;
1118 info->control.vif = &tx->sdata->vif; 1118 info->control.vif = &tx->sdata->vif;
1119 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; 1119 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1120 info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS; 1120 info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS |
1121 IEEE80211_TX_CTL_NO_PS_BUFFER |
1122 IEEE80211_TX_STATUS_EOSP;
1121 __skb_queue_tail(&tid_tx->pending, skb); 1123 __skb_queue_tail(&tid_tx->pending, skb);
1122 if (skb_queue_len(&tid_tx->pending) > STA_MAX_TX_BUFFER) 1124 if (skb_queue_len(&tid_tx->pending) > STA_MAX_TX_BUFFER)
1123 purge_skb = __skb_dequeue(&tid_tx->pending); 1125 purge_skb = __skb_dequeue(&tid_tx->pending);
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 5d2b806a862e..38fbc194b9cb 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -319,7 +319,13 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
319 * return *ignored=0 i.e. ICMP and NF_DROP 319 * return *ignored=0 i.e. ICMP and NF_DROP
320 */ 320 */
321 sched = rcu_dereference(svc->scheduler); 321 sched = rcu_dereference(svc->scheduler);
322 dest = sched->schedule(svc, skb, iph); 322 if (sched) {
323 /* read svc->sched_data after svc->scheduler */
324 smp_rmb();
325 dest = sched->schedule(svc, skb, iph);
326 } else {
327 dest = NULL;
328 }
323 if (!dest) { 329 if (!dest) {
324 IP_VS_DBG(1, "p-schedule: no dest found.\n"); 330 IP_VS_DBG(1, "p-schedule: no dest found.\n");
325 kfree(param.pe_data); 331 kfree(param.pe_data);
@@ -467,7 +473,13 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
467 } 473 }
468 474
469 sched = rcu_dereference(svc->scheduler); 475 sched = rcu_dereference(svc->scheduler);
470 dest = sched->schedule(svc, skb, iph); 476 if (sched) {
477 /* read svc->sched_data after svc->scheduler */
478 smp_rmb();
479 dest = sched->schedule(svc, skb, iph);
480 } else {
481 dest = NULL;
482 }
471 if (dest == NULL) { 483 if (dest == NULL) {
472 IP_VS_DBG(1, "Schedule: no dest found.\n"); 484 IP_VS_DBG(1, "Schedule: no dest found.\n");
473 return NULL; 485 return NULL;
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 285eae3a1454..24c554201a76 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -842,15 +842,16 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
842 __ip_vs_dst_cache_reset(dest); 842 __ip_vs_dst_cache_reset(dest);
843 spin_unlock_bh(&dest->dst_lock); 843 spin_unlock_bh(&dest->dst_lock);
844 844
845 sched = rcu_dereference_protected(svc->scheduler, 1);
846 if (add) { 845 if (add) {
847 ip_vs_start_estimator(svc->net, &dest->stats); 846 ip_vs_start_estimator(svc->net, &dest->stats);
848 list_add_rcu(&dest->n_list, &svc->destinations); 847 list_add_rcu(&dest->n_list, &svc->destinations);
849 svc->num_dests++; 848 svc->num_dests++;
850 if (sched->add_dest) 849 sched = rcu_dereference_protected(svc->scheduler, 1);
850 if (sched && sched->add_dest)
851 sched->add_dest(svc, dest); 851 sched->add_dest(svc, dest);
852 } else { 852 } else {
853 if (sched->upd_dest) 853 sched = rcu_dereference_protected(svc->scheduler, 1);
854 if (sched && sched->upd_dest)
854 sched->upd_dest(svc, dest); 855 sched->upd_dest(svc, dest);
855 } 856 }
856} 857}
@@ -1084,7 +1085,7 @@ static void __ip_vs_unlink_dest(struct ip_vs_service *svc,
1084 struct ip_vs_scheduler *sched; 1085 struct ip_vs_scheduler *sched;
1085 1086
1086 sched = rcu_dereference_protected(svc->scheduler, 1); 1087 sched = rcu_dereference_protected(svc->scheduler, 1);
1087 if (sched->del_dest) 1088 if (sched && sched->del_dest)
1088 sched->del_dest(svc, dest); 1089 sched->del_dest(svc, dest);
1089 } 1090 }
1090} 1091}
@@ -1175,11 +1176,14 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
1175 ip_vs_use_count_inc(); 1176 ip_vs_use_count_inc();
1176 1177
1177 /* Lookup the scheduler by 'u->sched_name' */ 1178 /* Lookup the scheduler by 'u->sched_name' */
1178 sched = ip_vs_scheduler_get(u->sched_name); 1179 if (strcmp(u->sched_name, "none")) {
1179 if (sched == NULL) { 1180 sched = ip_vs_scheduler_get(u->sched_name);
1180 pr_info("Scheduler module ip_vs_%s not found\n", u->sched_name); 1181 if (!sched) {
1181 ret = -ENOENT; 1182 pr_info("Scheduler module ip_vs_%s not found\n",
1182 goto out_err; 1183 u->sched_name);
1184 ret = -ENOENT;
1185 goto out_err;
1186 }
1183 } 1187 }
1184 1188
1185 if (u->pe_name && *u->pe_name) { 1189 if (u->pe_name && *u->pe_name) {
@@ -1240,10 +1244,12 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
1240 spin_lock_init(&svc->stats.lock); 1244 spin_lock_init(&svc->stats.lock);
1241 1245
1242 /* Bind the scheduler */ 1246 /* Bind the scheduler */
1243 ret = ip_vs_bind_scheduler(svc, sched); 1247 if (sched) {
1244 if (ret) 1248 ret = ip_vs_bind_scheduler(svc, sched);
1245 goto out_err; 1249 if (ret)
1246 sched = NULL; 1250 goto out_err;
1251 sched = NULL;
1252 }
1247 1253
1248 /* Bind the ct retriever */ 1254 /* Bind the ct retriever */
1249 RCU_INIT_POINTER(svc->pe, pe); 1255 RCU_INIT_POINTER(svc->pe, pe);
@@ -1291,17 +1297,20 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
1291static int 1297static int
1292ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u) 1298ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u)
1293{ 1299{
1294 struct ip_vs_scheduler *sched, *old_sched; 1300 struct ip_vs_scheduler *sched = NULL, *old_sched;
1295 struct ip_vs_pe *pe = NULL, *old_pe = NULL; 1301 struct ip_vs_pe *pe = NULL, *old_pe = NULL;
1296 int ret = 0; 1302 int ret = 0;
1297 1303
1298 /* 1304 /*
1299 * Lookup the scheduler, by 'u->sched_name' 1305 * Lookup the scheduler, by 'u->sched_name'
1300 */ 1306 */
1301 sched = ip_vs_scheduler_get(u->sched_name); 1307 if (strcmp(u->sched_name, "none")) {
1302 if (sched == NULL) { 1308 sched = ip_vs_scheduler_get(u->sched_name);
1303 pr_info("Scheduler module ip_vs_%s not found\n", u->sched_name); 1309 if (!sched) {
1304 return -ENOENT; 1310 pr_info("Scheduler module ip_vs_%s not found\n",
1311 u->sched_name);
1312 return -ENOENT;
1313 }
1305 } 1314 }
1306 old_sched = sched; 1315 old_sched = sched;
1307 1316
@@ -1329,14 +1338,20 @@ ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u)
1329 1338
1330 old_sched = rcu_dereference_protected(svc->scheduler, 1); 1339 old_sched = rcu_dereference_protected(svc->scheduler, 1);
1331 if (sched != old_sched) { 1340 if (sched != old_sched) {
1341 if (old_sched) {
1342 ip_vs_unbind_scheduler(svc, old_sched);
1343 RCU_INIT_POINTER(svc->scheduler, NULL);
1344 /* Wait all svc->sched_data users */
1345 synchronize_rcu();
1346 }
1332 /* Bind the new scheduler */ 1347 /* Bind the new scheduler */
1333 ret = ip_vs_bind_scheduler(svc, sched); 1348 if (sched) {
1334 if (ret) { 1349 ret = ip_vs_bind_scheduler(svc, sched);
1335 old_sched = sched; 1350 if (ret) {
1336 goto out; 1351 ip_vs_scheduler_put(sched);
1352 goto out;
1353 }
1337 } 1354 }
1338 /* Unbind the old scheduler on success */
1339 ip_vs_unbind_scheduler(svc, old_sched);
1340 } 1355 }
1341 1356
1342 /* 1357 /*
@@ -1982,6 +1997,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
1982 const struct ip_vs_iter *iter = seq->private; 1997 const struct ip_vs_iter *iter = seq->private;
1983 const struct ip_vs_dest *dest; 1998 const struct ip_vs_dest *dest;
1984 struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler); 1999 struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler);
2000 char *sched_name = sched ? sched->name : "none";
1985 2001
1986 if (iter->table == ip_vs_svc_table) { 2002 if (iter->table == ip_vs_svc_table) {
1987#ifdef CONFIG_IP_VS_IPV6 2003#ifdef CONFIG_IP_VS_IPV6
@@ -1990,18 +2006,18 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
1990 ip_vs_proto_name(svc->protocol), 2006 ip_vs_proto_name(svc->protocol),
1991 &svc->addr.in6, 2007 &svc->addr.in6,
1992 ntohs(svc->port), 2008 ntohs(svc->port),
1993 sched->name); 2009 sched_name);
1994 else 2010 else
1995#endif 2011#endif
1996 seq_printf(seq, "%s %08X:%04X %s %s ", 2012 seq_printf(seq, "%s %08X:%04X %s %s ",
1997 ip_vs_proto_name(svc->protocol), 2013 ip_vs_proto_name(svc->protocol),
1998 ntohl(svc->addr.ip), 2014 ntohl(svc->addr.ip),
1999 ntohs(svc->port), 2015 ntohs(svc->port),
2000 sched->name, 2016 sched_name,
2001 (svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":""); 2017 (svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":"");
2002 } else { 2018 } else {
2003 seq_printf(seq, "FWM %08X %s %s", 2019 seq_printf(seq, "FWM %08X %s %s",
2004 svc->fwmark, sched->name, 2020 svc->fwmark, sched_name,
2005 (svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":""); 2021 (svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":"");
2006 } 2022 }
2007 2023
@@ -2427,13 +2443,15 @@ ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src)
2427{ 2443{
2428 struct ip_vs_scheduler *sched; 2444 struct ip_vs_scheduler *sched;
2429 struct ip_vs_kstats kstats; 2445 struct ip_vs_kstats kstats;
2446 char *sched_name;
2430 2447
2431 sched = rcu_dereference_protected(src->scheduler, 1); 2448 sched = rcu_dereference_protected(src->scheduler, 1);
2449 sched_name = sched ? sched->name : "none";
2432 dst->protocol = src->protocol; 2450 dst->protocol = src->protocol;
2433 dst->addr = src->addr.ip; 2451 dst->addr = src->addr.ip;
2434 dst->port = src->port; 2452 dst->port = src->port;
2435 dst->fwmark = src->fwmark; 2453 dst->fwmark = src->fwmark;
2436 strlcpy(dst->sched_name, sched->name, sizeof(dst->sched_name)); 2454 strlcpy(dst->sched_name, sched_name, sizeof(dst->sched_name));
2437 dst->flags = src->flags; 2455 dst->flags = src->flags;
2438 dst->timeout = src->timeout / HZ; 2456 dst->timeout = src->timeout / HZ;
2439 dst->netmask = src->netmask; 2457 dst->netmask = src->netmask;
@@ -2892,6 +2910,7 @@ static int ip_vs_genl_fill_service(struct sk_buff *skb,
2892 struct ip_vs_flags flags = { .flags = svc->flags, 2910 struct ip_vs_flags flags = { .flags = svc->flags,
2893 .mask = ~0 }; 2911 .mask = ~0 };
2894 struct ip_vs_kstats kstats; 2912 struct ip_vs_kstats kstats;
2913 char *sched_name;
2895 2914
2896 nl_service = nla_nest_start(skb, IPVS_CMD_ATTR_SERVICE); 2915 nl_service = nla_nest_start(skb, IPVS_CMD_ATTR_SERVICE);
2897 if (!nl_service) 2916 if (!nl_service)
@@ -2910,8 +2929,9 @@ static int ip_vs_genl_fill_service(struct sk_buff *skb,
2910 } 2929 }
2911 2930
2912 sched = rcu_dereference_protected(svc->scheduler, 1); 2931 sched = rcu_dereference_protected(svc->scheduler, 1);
2932 sched_name = sched ? sched->name : "none";
2913 pe = rcu_dereference_protected(svc->pe, 1); 2933 pe = rcu_dereference_protected(svc->pe, 1);
2914 if (nla_put_string(skb, IPVS_SVC_ATTR_SCHED_NAME, sched->name) || 2934 if (nla_put_string(skb, IPVS_SVC_ATTR_SCHED_NAME, sched_name) ||
2915 (pe && nla_put_string(skb, IPVS_SVC_ATTR_PE_NAME, pe->name)) || 2935 (pe && nla_put_string(skb, IPVS_SVC_ATTR_PE_NAME, pe->name)) ||
2916 nla_put(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags) || 2936 nla_put(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags) ||
2917 nla_put_u32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ) || 2937 nla_put_u32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ) ||
diff --git a/net/netfilter/ipvs/ip_vs_sched.c b/net/netfilter/ipvs/ip_vs_sched.c
index 199760c71f39..7e8141647943 100644
--- a/net/netfilter/ipvs/ip_vs_sched.c
+++ b/net/netfilter/ipvs/ip_vs_sched.c
@@ -74,7 +74,7 @@ void ip_vs_unbind_scheduler(struct ip_vs_service *svc,
74 74
75 if (sched->done_service) 75 if (sched->done_service)
76 sched->done_service(svc); 76 sched->done_service(svc);
77 /* svc->scheduler can not be set to NULL */ 77 /* svc->scheduler can be set to NULL only by caller */
78} 78}
79 79
80 80
@@ -147,21 +147,21 @@ void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler)
147 147
148void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg) 148void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg)
149{ 149{
150 struct ip_vs_scheduler *sched; 150 struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler);
151 char *sched_name = sched ? sched->name : "none";
151 152
152 sched = rcu_dereference(svc->scheduler);
153 if (svc->fwmark) { 153 if (svc->fwmark) {
154 IP_VS_ERR_RL("%s: FWM %u 0x%08X - %s\n", 154 IP_VS_ERR_RL("%s: FWM %u 0x%08X - %s\n",
155 sched->name, svc->fwmark, svc->fwmark, msg); 155 sched_name, svc->fwmark, svc->fwmark, msg);
156#ifdef CONFIG_IP_VS_IPV6 156#ifdef CONFIG_IP_VS_IPV6
157 } else if (svc->af == AF_INET6) { 157 } else if (svc->af == AF_INET6) {
158 IP_VS_ERR_RL("%s: %s [%pI6c]:%d - %s\n", 158 IP_VS_ERR_RL("%s: %s [%pI6c]:%d - %s\n",
159 sched->name, ip_vs_proto_name(svc->protocol), 159 sched_name, ip_vs_proto_name(svc->protocol),
160 &svc->addr.in6, ntohs(svc->port), msg); 160 &svc->addr.in6, ntohs(svc->port), msg);
161#endif 161#endif
162 } else { 162 } else {
163 IP_VS_ERR_RL("%s: %s %pI4:%d - %s\n", 163 IP_VS_ERR_RL("%s: %s %pI4:%d - %s\n",
164 sched->name, ip_vs_proto_name(svc->protocol), 164 sched_name, ip_vs_proto_name(svc->protocol),
165 &svc->addr.ip, ntohs(svc->port), msg); 165 &svc->addr.ip, ntohs(svc->port), msg);
166 } 166 }
167} 167}
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index b08ba9538d12..d99ad93eb855 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -612,7 +612,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
612 pkts = atomic_add_return(1, &cp->in_pkts); 612 pkts = atomic_add_return(1, &cp->in_pkts);
613 else 613 else
614 pkts = sysctl_sync_threshold(ipvs); 614 pkts = sysctl_sync_threshold(ipvs);
615 ip_vs_sync_conn(net, cp->control, pkts); 615 ip_vs_sync_conn(net, cp, pkts);
616 } 616 }
617} 617}
618 618
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index bf66a8657a5f..258a0b0e82a2 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -130,7 +130,6 @@ static struct rtable *do_output_route4(struct net *net, __be32 daddr,
130 130
131 memset(&fl4, 0, sizeof(fl4)); 131 memset(&fl4, 0, sizeof(fl4));
132 fl4.daddr = daddr; 132 fl4.daddr = daddr;
133 fl4.saddr = (rt_mode & IP_VS_RT_MODE_CONNECT) ? *saddr : 0;
134 fl4.flowi4_flags = (rt_mode & IP_VS_RT_MODE_KNOWN_NH) ? 133 fl4.flowi4_flags = (rt_mode & IP_VS_RT_MODE_KNOWN_NH) ?
135 FLOWI_FLAG_KNOWN_NH : 0; 134 FLOWI_FLAG_KNOWN_NH : 0;
136 135
@@ -505,6 +504,13 @@ err_put:
505 return -1; 504 return -1;
506 505
507err_unreach: 506err_unreach:
507 /* The ip6_link_failure function requires the dev field to be set
508 * in order to get the net (further for the sake of fwmark
509 * reflection).
510 */
511 if (!skb->dev)
512 skb->dev = skb_dst(skb)->dev;
513
508 dst_link_failure(skb); 514 dst_link_failure(skb);
509 return -1; 515 return -1;
510} 516}
@@ -523,10 +529,27 @@ static inline int ip_vs_tunnel_xmit_prepare(struct sk_buff *skb,
523 if (ret == NF_ACCEPT) { 529 if (ret == NF_ACCEPT) {
524 nf_reset(skb); 530 nf_reset(skb);
525 skb_forward_csum(skb); 531 skb_forward_csum(skb);
532 if (!skb->sk)
533 skb_sender_cpu_clear(skb);
526 } 534 }
527 return ret; 535 return ret;
528} 536}
529 537
538/* In the event of a remote destination, it's possible that we would have
539 * matches against an old socket (particularly a TIME-WAIT socket). This
540 * causes havoc down the line (ip_local_out et. al. expect regular sockets
541 * and invalid memory accesses will happen) so simply drop the association
542 * in this case.
543*/
544static inline void ip_vs_drop_early_demux_sk(struct sk_buff *skb)
545{
546 /* If dev is set, the packet came from the LOCAL_IN callback and
547 * not from a local TCP socket.
548 */
549 if (skb->dev)
550 skb_orphan(skb);
551}
552
530/* return NF_STOLEN (sent) or NF_ACCEPT if local=1 (not sent) */ 553/* return NF_STOLEN (sent) or NF_ACCEPT if local=1 (not sent) */
531static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb, 554static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb,
532 struct ip_vs_conn *cp, int local) 555 struct ip_vs_conn *cp, int local)
@@ -538,12 +561,23 @@ static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb,
538 ip_vs_notrack(skb); 561 ip_vs_notrack(skb);
539 else 562 else
540 ip_vs_update_conntrack(skb, cp, 1); 563 ip_vs_update_conntrack(skb, cp, 1);
564
565 /* Remove the early_demux association unless it's bound for the
566 * exact same port and address on this host after translation.
567 */
568 if (!local || cp->vport != cp->dport ||
569 !ip_vs_addr_equal(cp->af, &cp->vaddr, &cp->daddr))
570 ip_vs_drop_early_demux_sk(skb);
571
541 if (!local) { 572 if (!local) {
542 skb_forward_csum(skb); 573 skb_forward_csum(skb);
574 if (!skb->sk)
575 skb_sender_cpu_clear(skb);
543 NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb, 576 NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb,
544 NULL, skb_dst(skb)->dev, dst_output_sk); 577 NULL, skb_dst(skb)->dev, dst_output_sk);
545 } else 578 } else
546 ret = NF_ACCEPT; 579 ret = NF_ACCEPT;
580
547 return ret; 581 return ret;
548} 582}
549 583
@@ -557,7 +591,10 @@ static inline int ip_vs_send_or_cont(int pf, struct sk_buff *skb,
557 if (likely(!(cp->flags & IP_VS_CONN_F_NFCT))) 591 if (likely(!(cp->flags & IP_VS_CONN_F_NFCT)))
558 ip_vs_notrack(skb); 592 ip_vs_notrack(skb);
559 if (!local) { 593 if (!local) {
594 ip_vs_drop_early_demux_sk(skb);
560 skb_forward_csum(skb); 595 skb_forward_csum(skb);
596 if (!skb->sk)
597 skb_sender_cpu_clear(skb);
561 NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb, 598 NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb,
562 NULL, skb_dst(skb)->dev, dst_output_sk); 599 NULL, skb_dst(skb)->dev, dst_output_sk);
563 } else 600 } else
@@ -845,6 +882,8 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af,
845 struct ipv6hdr *old_ipv6h = NULL; 882 struct ipv6hdr *old_ipv6h = NULL;
846#endif 883#endif
847 884
885 ip_vs_drop_early_demux_sk(skb);
886
848 if (skb_headroom(skb) < max_headroom || skb_cloned(skb)) { 887 if (skb_headroom(skb) < max_headroom || skb_cloned(skb)) {
849 new_skb = skb_realloc_headroom(skb, max_headroom); 888 new_skb = skb_realloc_headroom(skb, max_headroom);
850 if (!new_skb) 889 if (!new_skb)
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 13fad8668f83..3c20d02aee73 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -287,6 +287,46 @@ static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct)
287 spin_unlock(&pcpu->lock); 287 spin_unlock(&pcpu->lock);
288} 288}
289 289
290/* Released via destroy_conntrack() */
291struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags)
292{
293 struct nf_conn *tmpl;
294
295 tmpl = kzalloc(sizeof(*tmpl), flags);
296 if (tmpl == NULL)
297 return NULL;
298
299 tmpl->status = IPS_TEMPLATE;
300 write_pnet(&tmpl->ct_net, net);
301
302#ifdef CONFIG_NF_CONNTRACK_ZONES
303 if (zone) {
304 struct nf_conntrack_zone *nf_ct_zone;
305
306 nf_ct_zone = nf_ct_ext_add(tmpl, NF_CT_EXT_ZONE, flags);
307 if (!nf_ct_zone)
308 goto out_free;
309 nf_ct_zone->id = zone;
310 }
311#endif
312 atomic_set(&tmpl->ct_general.use, 0);
313
314 return tmpl;
315#ifdef CONFIG_NF_CONNTRACK_ZONES
316out_free:
317 kfree(tmpl);
318 return NULL;
319#endif
320}
321EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
322
323static void nf_ct_tmpl_free(struct nf_conn *tmpl)
324{
325 nf_ct_ext_destroy(tmpl);
326 nf_ct_ext_free(tmpl);
327 kfree(tmpl);
328}
329
290static void 330static void
291destroy_conntrack(struct nf_conntrack *nfct) 331destroy_conntrack(struct nf_conntrack *nfct)
292{ 332{
@@ -298,6 +338,10 @@ destroy_conntrack(struct nf_conntrack *nfct)
298 NF_CT_ASSERT(atomic_read(&nfct->use) == 0); 338 NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
299 NF_CT_ASSERT(!timer_pending(&ct->timeout)); 339 NF_CT_ASSERT(!timer_pending(&ct->timeout));
300 340
341 if (unlikely(nf_ct_is_template(ct))) {
342 nf_ct_tmpl_free(ct);
343 return;
344 }
301 rcu_read_lock(); 345 rcu_read_lock();
302 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); 346 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
303 if (l4proto && l4proto->destroy) 347 if (l4proto && l4proto->destroy)
@@ -540,28 +584,6 @@ out:
540} 584}
541EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert); 585EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
542 586
543/* deletion from this larval template list happens via nf_ct_put() */
544void nf_conntrack_tmpl_insert(struct net *net, struct nf_conn *tmpl)
545{
546 struct ct_pcpu *pcpu;
547
548 __set_bit(IPS_TEMPLATE_BIT, &tmpl->status);
549 __set_bit(IPS_CONFIRMED_BIT, &tmpl->status);
550 nf_conntrack_get(&tmpl->ct_general);
551
552 /* add this conntrack to the (per cpu) tmpl list */
553 local_bh_disable();
554 tmpl->cpu = smp_processor_id();
555 pcpu = per_cpu_ptr(nf_ct_net(tmpl)->ct.pcpu_lists, tmpl->cpu);
556
557 spin_lock(&pcpu->lock);
558 /* Overload tuple linked list to put us in template list. */
559 hlist_nulls_add_head_rcu(&tmpl->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
560 &pcpu->tmpl);
561 spin_unlock_bh(&pcpu->lock);
562}
563EXPORT_SYMBOL_GPL(nf_conntrack_tmpl_insert);
564
565/* Confirm a connection given skb; places it in hash table */ 587/* Confirm a connection given skb; places it in hash table */
566int 588int
567__nf_conntrack_confirm(struct sk_buff *skb) 589__nf_conntrack_confirm(struct sk_buff *skb)
@@ -1522,10 +1544,8 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
1522 sz = nr_slots * sizeof(struct hlist_nulls_head); 1544 sz = nr_slots * sizeof(struct hlist_nulls_head);
1523 hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 1545 hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
1524 get_order(sz)); 1546 get_order(sz));
1525 if (!hash) { 1547 if (!hash)
1526 printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
1527 hash = vzalloc(sz); 1548 hash = vzalloc(sz);
1528 }
1529 1549
1530 if (hash && nulls) 1550 if (hash && nulls)
1531 for (i = 0; i < nr_slots; i++) 1551 for (i = 0; i < nr_slots; i++)
@@ -1751,7 +1771,6 @@ int nf_conntrack_init_net(struct net *net)
1751 spin_lock_init(&pcpu->lock); 1771 spin_lock_init(&pcpu->lock);
1752 INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL); 1772 INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL);
1753 INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL); 1773 INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL);
1754 INIT_HLIST_NULLS_HEAD(&pcpu->tmpl, TEMPLATE_NULLS_VAL);
1755 } 1774 }
1756 1775
1757 net->ct.stat = alloc_percpu(struct ip_conntrack_stat); 1776 net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 7a17070c5dab..b45a4223cb05 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -219,7 +219,8 @@ static inline int expect_clash(const struct nf_conntrack_expect *a,
219 a->mask.src.u3.all[count] & b->mask.src.u3.all[count]; 219 a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
220 } 220 }
221 221
222 return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask); 222 return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) &&
223 nf_ct_zone(a->master) == nf_ct_zone(b->master);
223} 224}
224 225
225static inline int expect_matches(const struct nf_conntrack_expect *a, 226static inline int expect_matches(const struct nf_conntrack_expect *a,
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index d1c23940a86a..6b8b0abbfab4 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -2995,11 +2995,6 @@ ctnetlink_create_expect(struct net *net, u16 zone,
2995 } 2995 }
2996 2996
2997 err = nf_ct_expect_related_report(exp, portid, report); 2997 err = nf_ct_expect_related_report(exp, portid, report);
2998 if (err < 0)
2999 goto err_exp;
3000
3001 return 0;
3002err_exp:
3003 nf_ct_expect_put(exp); 2998 nf_ct_expect_put(exp);
3004err_ct: 2999err_ct:
3005 nf_ct_put(ct); 3000 nf_ct_put(ct);
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index cd60d397fe05..8a8b2abc35ff 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -213,7 +213,7 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
213 213
214 if (verdict == NF_ACCEPT) { 214 if (verdict == NF_ACCEPT) {
215 next_hook: 215 next_hook:
216 verdict = nf_iterate(&nf_hooks[entry->state.pf][entry->state.hook], 216 verdict = nf_iterate(entry->state.hook_list,
217 skb, &entry->state, &elem); 217 skb, &entry->state, &elem);
218 } 218 }
219 219
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index 789feeae6c44..d7f168527903 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -349,23 +349,20 @@ static void __net_exit synproxy_proc_exit(struct net *net)
349static int __net_init synproxy_net_init(struct net *net) 349static int __net_init synproxy_net_init(struct net *net)
350{ 350{
351 struct synproxy_net *snet = synproxy_pernet(net); 351 struct synproxy_net *snet = synproxy_pernet(net);
352 struct nf_conntrack_tuple t;
353 struct nf_conn *ct; 352 struct nf_conn *ct;
354 int err = -ENOMEM; 353 int err = -ENOMEM;
355 354
356 memset(&t, 0, sizeof(t)); 355 ct = nf_ct_tmpl_alloc(net, 0, GFP_KERNEL);
357 ct = nf_conntrack_alloc(net, 0, &t, &t, GFP_KERNEL); 356 if (!ct)
358 if (IS_ERR(ct)) {
359 err = PTR_ERR(ct);
360 goto err1; 357 goto err1;
361 }
362 358
363 if (!nfct_seqadj_ext_add(ct)) 359 if (!nfct_seqadj_ext_add(ct))
364 goto err2; 360 goto err2;
365 if (!nfct_synproxy_ext_add(ct)) 361 if (!nfct_synproxy_ext_add(ct))
366 goto err2; 362 goto err2;
367 363
368 nf_conntrack_tmpl_insert(net, ct); 364 __set_bit(IPS_CONFIRMED_BIT, &ct->status);
365 nf_conntrack_get(&ct->ct_general);
369 snet->tmpl = ct; 366 snet->tmpl = ct;
370 367
371 snet->stats = alloc_percpu(struct synproxy_stats); 368 snet->stats = alloc_percpu(struct synproxy_stats);
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 8b117c90ecd7..0c0e8ecf02ab 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -269,6 +269,12 @@ static void nfnl_err_deliver(struct list_head *err_list, struct sk_buff *skb)
269 } 269 }
270} 270}
271 271
272enum {
273 NFNL_BATCH_FAILURE = (1 << 0),
274 NFNL_BATCH_DONE = (1 << 1),
275 NFNL_BATCH_REPLAY = (1 << 2),
276};
277
272static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh, 278static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
273 u_int16_t subsys_id) 279 u_int16_t subsys_id)
274{ 280{
@@ -276,13 +282,15 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
276 struct net *net = sock_net(skb->sk); 282 struct net *net = sock_net(skb->sk);
277 const struct nfnetlink_subsystem *ss; 283 const struct nfnetlink_subsystem *ss;
278 const struct nfnl_callback *nc; 284 const struct nfnl_callback *nc;
279 bool success = true, done = false;
280 static LIST_HEAD(err_list); 285 static LIST_HEAD(err_list);
286 u32 status;
281 int err; 287 int err;
282 288
283 if (subsys_id >= NFNL_SUBSYS_COUNT) 289 if (subsys_id >= NFNL_SUBSYS_COUNT)
284 return netlink_ack(skb, nlh, -EINVAL); 290 return netlink_ack(skb, nlh, -EINVAL);
285replay: 291replay:
292 status = 0;
293
286 skb = netlink_skb_clone(oskb, GFP_KERNEL); 294 skb = netlink_skb_clone(oskb, GFP_KERNEL);
287 if (!skb) 295 if (!skb)
288 return netlink_ack(oskb, nlh, -ENOMEM); 296 return netlink_ack(oskb, nlh, -ENOMEM);
@@ -336,10 +344,10 @@ replay:
336 if (type == NFNL_MSG_BATCH_BEGIN) { 344 if (type == NFNL_MSG_BATCH_BEGIN) {
337 /* Malformed: Batch begin twice */ 345 /* Malformed: Batch begin twice */
338 nfnl_err_reset(&err_list); 346 nfnl_err_reset(&err_list);
339 success = false; 347 status |= NFNL_BATCH_FAILURE;
340 goto done; 348 goto done;
341 } else if (type == NFNL_MSG_BATCH_END) { 349 } else if (type == NFNL_MSG_BATCH_END) {
342 done = true; 350 status |= NFNL_BATCH_DONE;
343 goto done; 351 goto done;
344 } else if (type < NLMSG_MIN_TYPE) { 352 } else if (type < NLMSG_MIN_TYPE) {
345 err = -EINVAL; 353 err = -EINVAL;
@@ -382,11 +390,8 @@ replay:
382 * original skb. 390 * original skb.
383 */ 391 */
384 if (err == -EAGAIN) { 392 if (err == -EAGAIN) {
385 nfnl_err_reset(&err_list); 393 status |= NFNL_BATCH_REPLAY;
386 ss->abort(oskb); 394 goto next;
387 nfnl_unlock(subsys_id);
388 kfree_skb(skb);
389 goto replay;
390 } 395 }
391 } 396 }
392ack: 397ack:
@@ -402,7 +407,7 @@ ack:
402 */ 407 */
403 nfnl_err_reset(&err_list); 408 nfnl_err_reset(&err_list);
404 netlink_ack(skb, nlmsg_hdr(oskb), -ENOMEM); 409 netlink_ack(skb, nlmsg_hdr(oskb), -ENOMEM);
405 success = false; 410 status |= NFNL_BATCH_FAILURE;
406 goto done; 411 goto done;
407 } 412 }
408 /* We don't stop processing the batch on errors, thus, 413 /* We don't stop processing the batch on errors, thus,
@@ -410,19 +415,26 @@ ack:
410 * triggers. 415 * triggers.
411 */ 416 */
412 if (err) 417 if (err)
413 success = false; 418 status |= NFNL_BATCH_FAILURE;
414 } 419 }
415 420next:
416 msglen = NLMSG_ALIGN(nlh->nlmsg_len); 421 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
417 if (msglen > skb->len) 422 if (msglen > skb->len)
418 msglen = skb->len; 423 msglen = skb->len;
419 skb_pull(skb, msglen); 424 skb_pull(skb, msglen);
420 } 425 }
421done: 426done:
422 if (success && done) 427 if (status & NFNL_BATCH_REPLAY) {
428 ss->abort(oskb);
429 nfnl_err_reset(&err_list);
430 nfnl_unlock(subsys_id);
431 kfree_skb(skb);
432 goto replay;
433 } else if (status == NFNL_BATCH_DONE) {
423 ss->commit(oskb); 434 ss->commit(oskb);
424 else 435 } else {
425 ss->abort(oskb); 436 ss->abort(oskb);
437 }
426 438
427 nfnl_err_deliver(&err_list, oskb); 439 nfnl_err_deliver(&err_list, oskb);
428 nfnl_unlock(subsys_id); 440 nfnl_unlock(subsys_id);
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 75747aecdebe..43ddeee404e9 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -184,7 +184,6 @@ out:
184static int xt_ct_tg_check(const struct xt_tgchk_param *par, 184static int xt_ct_tg_check(const struct xt_tgchk_param *par,
185 struct xt_ct_target_info_v1 *info) 185 struct xt_ct_target_info_v1 *info)
186{ 186{
187 struct nf_conntrack_tuple t;
188 struct nf_conn *ct; 187 struct nf_conn *ct;
189 int ret = -EOPNOTSUPP; 188 int ret = -EOPNOTSUPP;
190 189
@@ -202,11 +201,11 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
202 if (ret < 0) 201 if (ret < 0)
203 goto err1; 202 goto err1;
204 203
205 memset(&t, 0, sizeof(t)); 204 ct = nf_ct_tmpl_alloc(par->net, info->zone, GFP_KERNEL);
206 ct = nf_conntrack_alloc(par->net, info->zone, &t, &t, GFP_KERNEL); 205 if (!ct) {
207 ret = PTR_ERR(ct); 206 ret = -ENOMEM;
208 if (IS_ERR(ct))
209 goto err2; 207 goto err2;
208 }
210 209
211 ret = 0; 210 ret = 0;
212 if ((info->ct_events || info->exp_events) && 211 if ((info->ct_events || info->exp_events) &&
@@ -227,8 +226,8 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
227 if (ret < 0) 226 if (ret < 0)
228 goto err3; 227 goto err3;
229 } 228 }
230 229 __set_bit(IPS_CONFIRMED_BIT, &ct->status);
231 nf_conntrack_tmpl_insert(par->net, ct); 230 nf_conntrack_get(&ct->ct_general);
232out: 231out:
233 info->ct = ct; 232 info->ct = ct;
234 return 0; 233 return 0;
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index f407ebc13481..29d2c31f406c 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -126,6 +126,7 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
126 goto out; 126 goto out;
127 } 127 }
128 128
129 sysfs_attr_init(&info->timer->attr.attr);
129 info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL); 130 info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL);
130 if (!info->timer->attr.attr.name) { 131 if (!info->timer->attr.attr.name) {
131 ret = -ENOMEM; 132 ret = -ENOMEM;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index dea925388a5b..67d210477863 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -158,7 +158,7 @@ static int __netlink_remove_tap(struct netlink_tap *nt)
158out: 158out:
159 spin_unlock(&netlink_tap_lock); 159 spin_unlock(&netlink_tap_lock);
160 160
161 if (found && nt->module) 161 if (found)
162 module_put(nt->module); 162 module_put(nt->module);
163 163
164 return found ? 0 : -ENODEV; 164 return found ? 0 : -ENODEV;
@@ -357,25 +357,52 @@ err1:
357 return NULL; 357 return NULL;
358} 358}
359 359
360
361static void
362__netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, bool tx_ring, void **pg_vec,
363 unsigned int order)
364{
365 struct netlink_sock *nlk = nlk_sk(sk);
366 struct sk_buff_head *queue;
367 struct netlink_ring *ring;
368
369 queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
370 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
371
372 spin_lock_bh(&queue->lock);
373
374 ring->frame_max = req->nm_frame_nr - 1;
375 ring->head = 0;
376 ring->frame_size = req->nm_frame_size;
377 ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE;
378
379 swap(ring->pg_vec_len, req->nm_block_nr);
380 swap(ring->pg_vec_order, order);
381 swap(ring->pg_vec, pg_vec);
382
383 __skb_queue_purge(queue);
384 spin_unlock_bh(&queue->lock);
385
386 WARN_ON(atomic_read(&nlk->mapped));
387
388 if (pg_vec)
389 free_pg_vec(pg_vec, order, req->nm_block_nr);
390}
391
360static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, 392static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
361 bool closing, bool tx_ring) 393 bool tx_ring)
362{ 394{
363 struct netlink_sock *nlk = nlk_sk(sk); 395 struct netlink_sock *nlk = nlk_sk(sk);
364 struct netlink_ring *ring; 396 struct netlink_ring *ring;
365 struct sk_buff_head *queue;
366 void **pg_vec = NULL; 397 void **pg_vec = NULL;
367 unsigned int order = 0; 398 unsigned int order = 0;
368 int err;
369 399
370 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring; 400 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
371 queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
372 401
373 if (!closing) { 402 if (atomic_read(&nlk->mapped))
374 if (atomic_read(&nlk->mapped)) 403 return -EBUSY;
375 return -EBUSY; 404 if (atomic_read(&ring->pending))
376 if (atomic_read(&ring->pending)) 405 return -EBUSY;
377 return -EBUSY;
378 }
379 406
380 if (req->nm_block_nr) { 407 if (req->nm_block_nr) {
381 if (ring->pg_vec != NULL) 408 if (ring->pg_vec != NULL)
@@ -407,31 +434,19 @@ static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
407 return -EINVAL; 434 return -EINVAL;
408 } 435 }
409 436
410 err = -EBUSY;
411 mutex_lock(&nlk->pg_vec_lock); 437 mutex_lock(&nlk->pg_vec_lock);
412 if (closing || atomic_read(&nlk->mapped) == 0) { 438 if (atomic_read(&nlk->mapped) == 0) {
413 err = 0; 439 __netlink_set_ring(sk, req, tx_ring, pg_vec, order);
414 spin_lock_bh(&queue->lock); 440 mutex_unlock(&nlk->pg_vec_lock);
415 441 return 0;
416 ring->frame_max = req->nm_frame_nr - 1;
417 ring->head = 0;
418 ring->frame_size = req->nm_frame_size;
419 ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE;
420
421 swap(ring->pg_vec_len, req->nm_block_nr);
422 swap(ring->pg_vec_order, order);
423 swap(ring->pg_vec, pg_vec);
424
425 __skb_queue_purge(queue);
426 spin_unlock_bh(&queue->lock);
427
428 WARN_ON(atomic_read(&nlk->mapped));
429 } 442 }
443
430 mutex_unlock(&nlk->pg_vec_lock); 444 mutex_unlock(&nlk->pg_vec_lock);
431 445
432 if (pg_vec) 446 if (pg_vec)
433 free_pg_vec(pg_vec, order, req->nm_block_nr); 447 free_pg_vec(pg_vec, order, req->nm_block_nr);
434 return err; 448
449 return -EBUSY;
435} 450}
436 451
437static void netlink_mm_open(struct vm_area_struct *vma) 452static void netlink_mm_open(struct vm_area_struct *vma)
@@ -900,10 +915,10 @@ static void netlink_sock_destruct(struct sock *sk)
900 915
901 memset(&req, 0, sizeof(req)); 916 memset(&req, 0, sizeof(req));
902 if (nlk->rx_ring.pg_vec) 917 if (nlk->rx_ring.pg_vec)
903 netlink_set_ring(sk, &req, true, false); 918 __netlink_set_ring(sk, &req, false, NULL, 0);
904 memset(&req, 0, sizeof(req)); 919 memset(&req, 0, sizeof(req));
905 if (nlk->tx_ring.pg_vec) 920 if (nlk->tx_ring.pg_vec)
906 netlink_set_ring(sk, &req, true, true); 921 __netlink_set_ring(sk, &req, true, NULL, 0);
907 } 922 }
908#endif /* CONFIG_NETLINK_MMAP */ 923#endif /* CONFIG_NETLINK_MMAP */
909 924
@@ -1081,6 +1096,11 @@ static int netlink_insert(struct sock *sk, u32 portid)
1081 1096
1082 err = __netlink_insert(table, sk); 1097 err = __netlink_insert(table, sk);
1083 if (err) { 1098 if (err) {
1099 /* In case the hashtable backend returns with -EBUSY
1100 * from here, it must not escape to the caller.
1101 */
1102 if (unlikely(err == -EBUSY))
1103 err = -EOVERFLOW;
1084 if (err == -EEXIST) 1104 if (err == -EEXIST)
1085 err = -EADDRINUSE; 1105 err = -EADDRINUSE;
1086 nlk_sk(sk)->portid = 0; 1106 nlk_sk(sk)->portid = 0;
@@ -2223,7 +2243,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
2223 return -EINVAL; 2243 return -EINVAL;
2224 if (copy_from_user(&req, optval, sizeof(req))) 2244 if (copy_from_user(&req, optval, sizeof(req)))
2225 return -EFAULT; 2245 return -EFAULT;
2226 err = netlink_set_ring(sk, &req, false, 2246 err = netlink_set_ring(sk, &req,
2227 optname == NETLINK_TX_RING); 2247 optname == NETLINK_TX_RING);
2228 break; 2248 break;
2229 } 2249 }
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 8a8c0b8b4f63..ee34f474ad14 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -273,28 +273,36 @@ static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
273 return 0; 273 return 0;
274} 274}
275 275
276static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh, 276static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
277 __be32 *addr, __be32 new_addr) 277 __be32 addr, __be32 new_addr)
278{ 278{
279 int transport_len = skb->len - skb_transport_offset(skb); 279 int transport_len = skb->len - skb_transport_offset(skb);
280 280
281 if (nh->frag_off & htons(IP_OFFSET))
282 return;
283
281 if (nh->protocol == IPPROTO_TCP) { 284 if (nh->protocol == IPPROTO_TCP) {
282 if (likely(transport_len >= sizeof(struct tcphdr))) 285 if (likely(transport_len >= sizeof(struct tcphdr)))
283 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb, 286 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
284 *addr, new_addr, 1); 287 addr, new_addr, 1);
285 } else if (nh->protocol == IPPROTO_UDP) { 288 } else if (nh->protocol == IPPROTO_UDP) {
286 if (likely(transport_len >= sizeof(struct udphdr))) { 289 if (likely(transport_len >= sizeof(struct udphdr))) {
287 struct udphdr *uh = udp_hdr(skb); 290 struct udphdr *uh = udp_hdr(skb);
288 291
289 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) { 292 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
290 inet_proto_csum_replace4(&uh->check, skb, 293 inet_proto_csum_replace4(&uh->check, skb,
291 *addr, new_addr, 1); 294 addr, new_addr, 1);
292 if (!uh->check) 295 if (!uh->check)
293 uh->check = CSUM_MANGLED_0; 296 uh->check = CSUM_MANGLED_0;
294 } 297 }
295 } 298 }
296 } 299 }
300}
297 301
302static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
303 __be32 *addr, __be32 new_addr)
304{
305 update_ip_l4_checksum(skb, nh, *addr, new_addr);
298 csum_replace4(&nh->check, *addr, new_addr); 306 csum_replace4(&nh->check, *addr, new_addr);
299 skb_clear_hash(skb); 307 skb_clear_hash(skb);
300 *addr = new_addr; 308 *addr = new_addr;
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index 4613df8c8290..65523948fb95 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -752,7 +752,7 @@ int ovs_flow_init(void)
752 BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long)); 752 BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
753 753
754 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow) 754 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
755 + (num_possible_nodes() 755 + (nr_node_ids
756 * sizeof(struct flow_stats *)), 756 * sizeof(struct flow_stats *)),
757 0, 0, NULL); 757 0, 0, NULL);
758 if (flow_cache == NULL) 758 if (flow_cache == NULL)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index c9e8741226c6..ed458b315ef4 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2403,7 +2403,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2403 } 2403 }
2404 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto, 2404 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
2405 addr, hlen); 2405 addr, hlen);
2406 if (tp_len > dev->mtu + dev->hard_header_len) { 2406 if (likely(tp_len >= 0) &&
2407 tp_len > dev->mtu + dev->hard_header_len) {
2407 struct ethhdr *ehdr; 2408 struct ethhdr *ehdr;
2408 /* Earlier code assumed this would be a VLAN pkt, 2409 /* Earlier code assumed this would be a VLAN pkt,
2409 * double-check this now that we have the actual 2410 * double-check this now that we have the actual
@@ -2784,7 +2785,7 @@ static int packet_release(struct socket *sock)
2784static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto) 2785static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
2785{ 2786{
2786 struct packet_sock *po = pkt_sk(sk); 2787 struct packet_sock *po = pkt_sk(sk);
2787 const struct net_device *dev_curr; 2788 struct net_device *dev_curr;
2788 __be16 proto_curr; 2789 __be16 proto_curr;
2789 bool need_rehook; 2790 bool need_rehook;
2790 2791
@@ -2808,15 +2809,13 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
2808 2809
2809 po->num = proto; 2810 po->num = proto;
2810 po->prot_hook.type = proto; 2811 po->prot_hook.type = proto;
2811
2812 if (po->prot_hook.dev)
2813 dev_put(po->prot_hook.dev);
2814
2815 po->prot_hook.dev = dev; 2812 po->prot_hook.dev = dev;
2816 2813
2817 po->ifindex = dev ? dev->ifindex : 0; 2814 po->ifindex = dev ? dev->ifindex : 0;
2818 packet_cached_dev_assign(po, dev); 2815 packet_cached_dev_assign(po, dev);
2819 } 2816 }
2817 if (dev_curr)
2818 dev_put(dev_curr);
2820 2819
2821 if (proto == 0 || !need_rehook) 2820 if (proto == 0 || !need_rehook)
2822 goto out_unlock; 2821 goto out_unlock;
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index 273b8bff6ba4..657ba9f5d308 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -759,8 +759,10 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
759 } 759 }
760 760
761 ibmr = rds_ib_alloc_fmr(rds_ibdev); 761 ibmr = rds_ib_alloc_fmr(rds_ibdev);
762 if (IS_ERR(ibmr)) 762 if (IS_ERR(ibmr)) {
763 rds_ib_dev_put(rds_ibdev);
763 return ibmr; 764 return ibmr;
765 }
764 766
765 ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents); 767 ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents);
766 if (ret == 0) 768 if (ret == 0)
diff --git a/net/rds/info.c b/net/rds/info.c
index 9a6b4f66187c..140a44a5f7b7 100644
--- a/net/rds/info.c
+++ b/net/rds/info.c
@@ -176,7 +176,7 @@ int rds_info_getsockopt(struct socket *sock, int optname, char __user *optval,
176 176
177 /* check for all kinds of wrapping and the like */ 177 /* check for all kinds of wrapping and the like */
178 start = (unsigned long)optval; 178 start = (unsigned long)optval;
179 if (len < 0 || len + PAGE_SIZE - 1 < len || start + len < start) { 179 if (len < 0 || len > INT_MAX - PAGE_SIZE + 1 || start + len < start) {
180 ret = -EINVAL; 180 ret = -EINVAL;
181 goto out; 181 goto out;
182 } 182 }
diff --git a/net/rds/transport.c b/net/rds/transport.c
index 8b4a6cd2c3a7..83498e1c75b8 100644
--- a/net/rds/transport.c
+++ b/net/rds/transport.c
@@ -73,7 +73,7 @@ EXPORT_SYMBOL_GPL(rds_trans_unregister);
73 73
74void rds_trans_put(struct rds_transport *trans) 74void rds_trans_put(struct rds_transport *trans)
75{ 75{
76 if (trans && trans->t_owner) 76 if (trans)
77 module_put(trans->t_owner); 77 module_put(trans->t_owner);
78} 78}
79 79
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index af427a3dbcba..43ec92680ae8 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -45,7 +45,7 @@ void tcf_hash_destroy(struct tc_action *a)
45} 45}
46EXPORT_SYMBOL(tcf_hash_destroy); 46EXPORT_SYMBOL(tcf_hash_destroy);
47 47
48int tcf_hash_release(struct tc_action *a, int bind) 48int __tcf_hash_release(struct tc_action *a, bool bind, bool strict)
49{ 49{
50 struct tcf_common *p = a->priv; 50 struct tcf_common *p = a->priv;
51 int ret = 0; 51 int ret = 0;
@@ -53,7 +53,7 @@ int tcf_hash_release(struct tc_action *a, int bind)
53 if (p) { 53 if (p) {
54 if (bind) 54 if (bind)
55 p->tcfc_bindcnt--; 55 p->tcfc_bindcnt--;
56 else if (p->tcfc_bindcnt > 0) 56 else if (strict && p->tcfc_bindcnt > 0)
57 return -EPERM; 57 return -EPERM;
58 58
59 p->tcfc_refcnt--; 59 p->tcfc_refcnt--;
@@ -64,9 +64,10 @@ int tcf_hash_release(struct tc_action *a, int bind)
64 ret = 1; 64 ret = 1;
65 } 65 }
66 } 66 }
67
67 return ret; 68 return ret;
68} 69}
69EXPORT_SYMBOL(tcf_hash_release); 70EXPORT_SYMBOL(__tcf_hash_release);
70 71
71static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb, 72static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb,
72 struct tc_action *a) 73 struct tc_action *a)
@@ -136,7 +137,7 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a)
136 head = &hinfo->htab[tcf_hash(i, hinfo->hmask)]; 137 head = &hinfo->htab[tcf_hash(i, hinfo->hmask)];
137 hlist_for_each_entry_safe(p, n, head, tcfc_head) { 138 hlist_for_each_entry_safe(p, n, head, tcfc_head) {
138 a->priv = p; 139 a->priv = p;
139 ret = tcf_hash_release(a, 0); 140 ret = __tcf_hash_release(a, false, true);
140 if (ret == ACT_P_DELETED) { 141 if (ret == ACT_P_DELETED) {
141 module_put(a->ops->owner); 142 module_put(a->ops->owner);
142 n_i++; 143 n_i++;
@@ -408,7 +409,7 @@ int tcf_action_destroy(struct list_head *actions, int bind)
408 int ret = 0; 409 int ret = 0;
409 410
410 list_for_each_entry_safe(a, tmp, actions, list) { 411 list_for_each_entry_safe(a, tmp, actions, list) {
411 ret = tcf_hash_release(a, bind); 412 ret = __tcf_hash_release(a, bind, true);
412 if (ret == ACT_P_DELETED) 413 if (ret == ACT_P_DELETED)
413 module_put(a->ops->owner); 414 module_put(a->ops->owner);
414 else if (ret < 0) 415 else if (ret < 0)
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 1d56903fd4c7..d0edeb7a1950 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -27,9 +27,10 @@
27struct tcf_bpf_cfg { 27struct tcf_bpf_cfg {
28 struct bpf_prog *filter; 28 struct bpf_prog *filter;
29 struct sock_filter *bpf_ops; 29 struct sock_filter *bpf_ops;
30 char *bpf_name; 30 const char *bpf_name;
31 u32 bpf_fd; 31 u32 bpf_fd;
32 u16 bpf_num_ops; 32 u16 bpf_num_ops;
33 bool is_ebpf;
33}; 34};
34 35
35static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act, 36static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act,
@@ -207,6 +208,7 @@ static int tcf_bpf_init_from_ops(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
207 cfg->bpf_ops = bpf_ops; 208 cfg->bpf_ops = bpf_ops;
208 cfg->bpf_num_ops = bpf_num_ops; 209 cfg->bpf_num_ops = bpf_num_ops;
209 cfg->filter = fp; 210 cfg->filter = fp;
211 cfg->is_ebpf = false;
210 212
211 return 0; 213 return 0;
212} 214}
@@ -241,18 +243,40 @@ static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
241 cfg->bpf_fd = bpf_fd; 243 cfg->bpf_fd = bpf_fd;
242 cfg->bpf_name = name; 244 cfg->bpf_name = name;
243 cfg->filter = fp; 245 cfg->filter = fp;
246 cfg->is_ebpf = true;
244 247
245 return 0; 248 return 0;
246} 249}
247 250
251static void tcf_bpf_cfg_cleanup(const struct tcf_bpf_cfg *cfg)
252{
253 if (cfg->is_ebpf)
254 bpf_prog_put(cfg->filter);
255 else
256 bpf_prog_destroy(cfg->filter);
257
258 kfree(cfg->bpf_ops);
259 kfree(cfg->bpf_name);
260}
261
262static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf *prog,
263 struct tcf_bpf_cfg *cfg)
264{
265 cfg->is_ebpf = tcf_bpf_is_ebpf(prog);
266 cfg->filter = prog->filter;
267
268 cfg->bpf_ops = prog->bpf_ops;
269 cfg->bpf_name = prog->bpf_name;
270}
271
248static int tcf_bpf_init(struct net *net, struct nlattr *nla, 272static int tcf_bpf_init(struct net *net, struct nlattr *nla,
249 struct nlattr *est, struct tc_action *act, 273 struct nlattr *est, struct tc_action *act,
250 int replace, int bind) 274 int replace, int bind)
251{ 275{
252 struct nlattr *tb[TCA_ACT_BPF_MAX + 1]; 276 struct nlattr *tb[TCA_ACT_BPF_MAX + 1];
277 struct tcf_bpf_cfg cfg, old;
253 struct tc_act_bpf *parm; 278 struct tc_act_bpf *parm;
254 struct tcf_bpf *prog; 279 struct tcf_bpf *prog;
255 struct tcf_bpf_cfg cfg;
256 bool is_bpf, is_ebpf; 280 bool is_bpf, is_ebpf;
257 int ret; 281 int ret;
258 282
@@ -301,6 +325,9 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
301 prog = to_bpf(act); 325 prog = to_bpf(act);
302 spin_lock_bh(&prog->tcf_lock); 326 spin_lock_bh(&prog->tcf_lock);
303 327
328 if (ret != ACT_P_CREATED)
329 tcf_bpf_prog_fill_cfg(prog, &old);
330
304 prog->bpf_ops = cfg.bpf_ops; 331 prog->bpf_ops = cfg.bpf_ops;
305 prog->bpf_name = cfg.bpf_name; 332 prog->bpf_name = cfg.bpf_name;
306 333
@@ -316,29 +343,22 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
316 343
317 if (ret == ACT_P_CREATED) 344 if (ret == ACT_P_CREATED)
318 tcf_hash_insert(act); 345 tcf_hash_insert(act);
346 else
347 tcf_bpf_cfg_cleanup(&old);
319 348
320 return ret; 349 return ret;
321 350
322destroy_fp: 351destroy_fp:
323 if (is_ebpf) 352 tcf_bpf_cfg_cleanup(&cfg);
324 bpf_prog_put(cfg.filter);
325 else
326 bpf_prog_destroy(cfg.filter);
327
328 kfree(cfg.bpf_ops);
329 kfree(cfg.bpf_name);
330
331 return ret; 353 return ret;
332} 354}
333 355
334static void tcf_bpf_cleanup(struct tc_action *act, int bind) 356static void tcf_bpf_cleanup(struct tc_action *act, int bind)
335{ 357{
336 const struct tcf_bpf *prog = act->priv; 358 struct tcf_bpf_cfg tmp;
337 359
338 if (tcf_bpf_is_ebpf(prog)) 360 tcf_bpf_prog_fill_cfg(act->priv, &tmp);
339 bpf_prog_put(prog->filter); 361 tcf_bpf_cfg_cleanup(&tmp);
340 else
341 bpf_prog_destroy(prog->filter);
342} 362}
343 363
344static struct tc_action_ops act_bpf_ops __read_mostly = { 364static struct tc_action_ops act_bpf_ops __read_mostly = {
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index a42a3b257226..268545050ddb 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -98,6 +98,8 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
98 return ret; 98 return ret;
99 ret = ACT_P_CREATED; 99 ret = ACT_P_CREATED;
100 } else { 100 } else {
101 if (bind)
102 return 0;
101 if (!ovr) { 103 if (!ovr) {
102 tcf_hash_release(a, bind); 104 tcf_hash_release(a, bind);
103 return -EEXIST; 105 return -EEXIST;
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 17e6d6669c7f..ff8b466a73f6 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -68,13 +68,12 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
68 } 68 }
69 ret = ACT_P_CREATED; 69 ret = ACT_P_CREATED;
70 } else { 70 } else {
71 p = to_pedit(a);
72 tcf_hash_release(a, bind);
73 if (bind) 71 if (bind)
74 return 0; 72 return 0;
73 tcf_hash_release(a, bind);
75 if (!ovr) 74 if (!ovr)
76 return -EEXIST; 75 return -EEXIST;
77 76 p = to_pedit(a);
78 if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) { 77 if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) {
79 keys = kmalloc(ksize, GFP_KERNEL); 78 keys = kmalloc(ksize, GFP_KERNEL);
80 if (keys == NULL) 79 if (keys == NULL)
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index c79ecfd36e0f..e5168f8b9640 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -378,7 +378,7 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
378 goto errout; 378 goto errout;
379 379
380 if (oldprog) { 380 if (oldprog) {
381 list_replace_rcu(&prog->link, &oldprog->link); 381 list_replace_rcu(&oldprog->link, &prog->link);
382 tcf_unbind_filter(tp, &oldprog->res); 382 tcf_unbind_filter(tp, &oldprog->res);
383 call_rcu(&oldprog->rcu, __cls_bpf_delete_prog); 383 call_rcu(&oldprog->rcu, __cls_bpf_delete_prog);
384 } else { 384 } else {
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 76bc3a20ffdb..bb2a0f529c1f 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -425,6 +425,8 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
425 if (!fnew) 425 if (!fnew)
426 goto err2; 426 goto err2;
427 427
428 tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE);
429
428 fold = (struct flow_filter *)*arg; 430 fold = (struct flow_filter *)*arg;
429 if (fold) { 431 if (fold) {
430 err = -EINVAL; 432 err = -EINVAL;
@@ -486,7 +488,6 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
486 fnew->mask = ~0U; 488 fnew->mask = ~0U;
487 fnew->tp = tp; 489 fnew->tp = tp;
488 get_random_bytes(&fnew->hashrnd, 4); 490 get_random_bytes(&fnew->hashrnd, 4);
489 tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE);
490 } 491 }
491 492
492 fnew->perturb_timer.function = flow_perturbation; 493 fnew->perturb_timer.function = flow_perturbation;
@@ -526,7 +527,7 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
526 if (*arg == 0) 527 if (*arg == 0)
527 list_add_tail_rcu(&fnew->list, &head->filters); 528 list_add_tail_rcu(&fnew->list, &head->filters);
528 else 529 else
529 list_replace_rcu(&fnew->list, &fold->list); 530 list_replace_rcu(&fold->list, &fnew->list);
530 531
531 *arg = (unsigned long)fnew; 532 *arg = (unsigned long)fnew;
532 533
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 9d37ccd95062..2f3d03f99487 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -499,7 +499,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
499 *arg = (unsigned long) fnew; 499 *arg = (unsigned long) fnew;
500 500
501 if (fold) { 501 if (fold) {
502 list_replace_rcu(&fnew->list, &fold->list); 502 list_replace_rcu(&fold->list, &fnew->list);
503 tcf_unbind_filter(tp, &fold->res); 503 tcf_unbind_filter(tp, &fold->res);
504 call_rcu(&fold->rcu, fl_destroy_filter); 504 call_rcu(&fold->rcu, fl_destroy_filter);
505 } else { 505 } else {
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index 93d5742dc7e0..6a783afe4960 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -385,6 +385,19 @@ static void choke_reset(struct Qdisc *sch)
385{ 385{
386 struct choke_sched_data *q = qdisc_priv(sch); 386 struct choke_sched_data *q = qdisc_priv(sch);
387 387
388 while (q->head != q->tail) {
389 struct sk_buff *skb = q->tab[q->head];
390
391 q->head = (q->head + 1) & q->tab_mask;
392 if (!skb)
393 continue;
394 qdisc_qstats_backlog_dec(sch, skb);
395 --sch->q.qlen;
396 qdisc_drop(skb, sch);
397 }
398
399 memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *));
400 q->head = q->tail = 0;
388 red_restart(&q->vars); 401 red_restart(&q->vars);
389} 402}
390 403
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index d75993f89fac..a9ba030435a2 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -155,14 +155,23 @@ static unsigned int fq_codel_drop(struct Qdisc *sch)
155 skb = dequeue_head(flow); 155 skb = dequeue_head(flow);
156 len = qdisc_pkt_len(skb); 156 len = qdisc_pkt_len(skb);
157 q->backlogs[idx] -= len; 157 q->backlogs[idx] -= len;
158 kfree_skb(skb);
159 sch->q.qlen--; 158 sch->q.qlen--;
160 qdisc_qstats_drop(sch); 159 qdisc_qstats_drop(sch);
161 qdisc_qstats_backlog_dec(sch, skb); 160 qdisc_qstats_backlog_dec(sch, skb);
161 kfree_skb(skb);
162 flow->dropped++; 162 flow->dropped++;
163 return idx; 163 return idx;
164} 164}
165 165
166static unsigned int fq_codel_qdisc_drop(struct Qdisc *sch)
167{
168 unsigned int prev_backlog;
169
170 prev_backlog = sch->qstats.backlog;
171 fq_codel_drop(sch);
172 return prev_backlog - sch->qstats.backlog;
173}
174
166static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) 175static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
167{ 176{
168 struct fq_codel_sched_data *q = qdisc_priv(sch); 177 struct fq_codel_sched_data *q = qdisc_priv(sch);
@@ -279,10 +288,26 @@ begin:
279 288
280static void fq_codel_reset(struct Qdisc *sch) 289static void fq_codel_reset(struct Qdisc *sch)
281{ 290{
282 struct sk_buff *skb; 291 struct fq_codel_sched_data *q = qdisc_priv(sch);
292 int i;
283 293
284 while ((skb = fq_codel_dequeue(sch)) != NULL) 294 INIT_LIST_HEAD(&q->new_flows);
285 kfree_skb(skb); 295 INIT_LIST_HEAD(&q->old_flows);
296 for (i = 0; i < q->flows_cnt; i++) {
297 struct fq_codel_flow *flow = q->flows + i;
298
299 while (flow->head) {
300 struct sk_buff *skb = dequeue_head(flow);
301
302 qdisc_qstats_backlog_dec(sch, skb);
303 kfree_skb(skb);
304 }
305
306 INIT_LIST_HEAD(&flow->flowchain);
307 codel_vars_init(&flow->cvars);
308 }
309 memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
310 sch->q.qlen = 0;
286} 311}
287 312
288static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = { 313static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
@@ -604,7 +629,7 @@ static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
604 .enqueue = fq_codel_enqueue, 629 .enqueue = fq_codel_enqueue,
605 .dequeue = fq_codel_dequeue, 630 .dequeue = fq_codel_dequeue,
606 .peek = qdisc_peek_dequeued, 631 .peek = qdisc_peek_dequeued,
607 .drop = fq_codel_drop, 632 .drop = fq_codel_qdisc_drop,
608 .init = fq_codel_init, 633 .init = fq_codel_init,
609 .reset = fq_codel_reset, 634 .reset = fq_codel_reset,
610 .destroy = fq_codel_destroy, 635 .destroy = fq_codel_destroy,
diff --git a/net/sched/sch_plug.c b/net/sched/sch_plug.c
index 89f8fcf73f18..ade9445a55ab 100644
--- a/net/sched/sch_plug.c
+++ b/net/sched/sch_plug.c
@@ -216,6 +216,7 @@ static struct Qdisc_ops plug_qdisc_ops __read_mostly = {
216 .peek = qdisc_peek_head, 216 .peek = qdisc_peek_head,
217 .init = plug_init, 217 .init = plug_init,
218 .change = plug_change, 218 .change = plug_change,
219 .reset = qdisc_reset_queue,
219 .owner = THIS_MODULE, 220 .owner = THIS_MODULE,
220}; 221};
221 222
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 7d1492663360..52f75a5473e1 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -306,10 +306,10 @@ drop:
306 len = qdisc_pkt_len(skb); 306 len = qdisc_pkt_len(skb);
307 slot->backlog -= len; 307 slot->backlog -= len;
308 sfq_dec(q, x); 308 sfq_dec(q, x);
309 kfree_skb(skb);
310 sch->q.qlen--; 309 sch->q.qlen--;
311 qdisc_qstats_drop(sch); 310 qdisc_qstats_drop(sch);
312 qdisc_qstats_backlog_dec(sch, skb); 311 qdisc_qstats_backlog_dec(sch, skb);
312 kfree_skb(skb);
313 return len; 313 return len;
314 } 314 }
315 315
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 1425ec2bbd5a..17bef01b9aa3 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2200,12 +2200,6 @@ static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
2200 if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen)) 2200 if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen))
2201 return -EFAULT; 2201 return -EFAULT;
2202 2202
2203 if (sctp_sk(sk)->subscribe.sctp_data_io_event)
2204 pr_warn_ratelimited(DEPRECATED "%s (pid %d) "
2205 "Requested SCTP_SNDRCVINFO event.\n"
2206 "Use SCTP_RCVINFO through SCTP_RECVRCVINFO option instead.\n",
2207 current->comm, task_pid_nr(current));
2208
2209 /* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT, 2203 /* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT,
2210 * if there is no data to be sent or retransmit, the stack will 2204 * if there is no data to be sent or retransmit, the stack will
2211 * immediately send up this notification. 2205 * immediately send up this notification.
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index 9825ff0f91d6..6255d141133b 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -240,8 +240,8 @@ static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid)
240 req = xprt_alloc_bc_req(xprt, GFP_ATOMIC); 240 req = xprt_alloc_bc_req(xprt, GFP_ATOMIC);
241 if (!req) 241 if (!req)
242 goto not_found; 242 goto not_found;
243 /* Note: this 'free' request adds it to xprt->bc_pa_list */ 243 list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
244 xprt_free_bc_request(req); 244 xprt->bc_alloc_count++;
245 } 245 }
246 req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst, 246 req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
247 rq_bc_pa_list); 247 rq_bc_pa_list);
@@ -336,7 +336,7 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
336 336
337 spin_lock(&xprt->bc_pa_lock); 337 spin_lock(&xprt->bc_pa_lock);
338 list_del(&req->rq_bc_pa_list); 338 list_del(&req->rq_bc_pa_list);
339 xprt->bc_alloc_count--; 339 xprt_dec_alloc_count(xprt, 1);
340 spin_unlock(&xprt->bc_pa_lock); 340 spin_unlock(&xprt->bc_pa_lock);
341 341
342 req->rq_private_buf.len = copied; 342 req->rq_private_buf.len = copied;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index cbc6af923dd1..23608eb0ded2 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1902,6 +1902,7 @@ call_transmit_status(struct rpc_task *task)
1902 1902
1903 switch (task->tk_status) { 1903 switch (task->tk_status) {
1904 case -EAGAIN: 1904 case -EAGAIN:
1905 case -ENOBUFS:
1905 break; 1906 break;
1906 default: 1907 default:
1907 dprint_status(task); 1908 dprint_status(task);
@@ -1928,7 +1929,6 @@ call_transmit_status(struct rpc_task *task)
1928 case -ECONNABORTED: 1929 case -ECONNABORTED:
1929 case -EADDRINUSE: 1930 case -EADDRINUSE:
1930 case -ENOTCONN: 1931 case -ENOTCONN:
1931 case -ENOBUFS:
1932 case -EPIPE: 1932 case -EPIPE:
1933 rpc_task_force_reencode(task); 1933 rpc_task_force_reencode(task);
1934 } 1934 }
@@ -2057,12 +2057,13 @@ call_status(struct rpc_task *task)
2057 case -ECONNABORTED: 2057 case -ECONNABORTED:
2058 rpc_force_rebind(clnt); 2058 rpc_force_rebind(clnt);
2059 case -EADDRINUSE: 2059 case -EADDRINUSE:
2060 case -ENOBUFS:
2061 rpc_delay(task, 3*HZ); 2060 rpc_delay(task, 3*HZ);
2062 case -EPIPE: 2061 case -EPIPE:
2063 case -ENOTCONN: 2062 case -ENOTCONN:
2064 task->tk_action = call_bind; 2063 task->tk_action = call_bind;
2065 break; 2064 break;
2065 case -ENOBUFS:
2066 rpc_delay(task, HZ>>2);
2066 case -EAGAIN: 2067 case -EAGAIN:
2067 task->tk_action = call_transmit; 2068 task->tk_action = call_transmit;
2068 break; 2069 break;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index e193c2b5476b..0030376327b7 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -527,6 +527,10 @@ static int xs_local_send_request(struct rpc_task *task)
527 true, &sent); 527 true, &sent);
528 dprintk("RPC: %s(%u) = %d\n", 528 dprintk("RPC: %s(%u) = %d\n",
529 __func__, xdr->len - req->rq_bytes_sent, status); 529 __func__, xdr->len - req->rq_bytes_sent, status);
530
531 if (status == -EAGAIN && sock_writeable(transport->inet))
532 status = -ENOBUFS;
533
530 if (likely(sent > 0) || status == 0) { 534 if (likely(sent > 0) || status == 0) {
531 req->rq_bytes_sent += sent; 535 req->rq_bytes_sent += sent;
532 req->rq_xmit_bytes_sent += sent; 536 req->rq_xmit_bytes_sent += sent;
@@ -539,6 +543,7 @@ static int xs_local_send_request(struct rpc_task *task)
539 543
540 switch (status) { 544 switch (status) {
541 case -ENOBUFS: 545 case -ENOBUFS:
546 break;
542 case -EAGAIN: 547 case -EAGAIN:
543 status = xs_nospace(task); 548 status = xs_nospace(task);
544 break; 549 break;
@@ -589,6 +594,9 @@ static int xs_udp_send_request(struct rpc_task *task)
589 if (status == -EPERM) 594 if (status == -EPERM)
590 goto process_status; 595 goto process_status;
591 596
597 if (status == -EAGAIN && sock_writeable(transport->inet))
598 status = -ENOBUFS;
599
592 if (sent > 0 || status == 0) { 600 if (sent > 0 || status == 0) {
593 req->rq_xmit_bytes_sent += sent; 601 req->rq_xmit_bytes_sent += sent;
594 if (sent >= req->rq_slen) 602 if (sent >= req->rq_slen)
@@ -669,9 +677,6 @@ static int xs_tcp_send_request(struct rpc_task *task)
669 dprintk("RPC: xs_tcp_send_request(%u) = %d\n", 677 dprintk("RPC: xs_tcp_send_request(%u) = %d\n",
670 xdr->len - req->rq_bytes_sent, status); 678 xdr->len - req->rq_bytes_sent, status);
671 679
672 if (unlikely(sent == 0 && status < 0))
673 break;
674
675 /* If we've sent the entire packet, immediately 680 /* If we've sent the entire packet, immediately
676 * reset the count of bytes sent. */ 681 * reset the count of bytes sent. */
677 req->rq_bytes_sent += sent; 682 req->rq_bytes_sent += sent;
@@ -681,18 +686,21 @@ static int xs_tcp_send_request(struct rpc_task *task)
681 return 0; 686 return 0;
682 } 687 }
683 688
684 if (sent != 0) 689 if (status < 0)
685 continue; 690 break;
686 status = -EAGAIN; 691 if (sent == 0) {
687 break; 692 status = -EAGAIN;
693 break;
694 }
688 } 695 }
696 if (status == -EAGAIN && sk_stream_is_writeable(transport->inet))
697 status = -ENOBUFS;
689 698
690 switch (status) { 699 switch (status) {
691 case -ENOTSOCK: 700 case -ENOTSOCK:
692 status = -ENOTCONN; 701 status = -ENOTCONN;
693 /* Should we call xs_close() here? */ 702 /* Should we call xs_close() here? */
694 break; 703 break;
695 case -ENOBUFS:
696 case -EAGAIN: 704 case -EAGAIN:
697 status = xs_nospace(task); 705 status = xs_nospace(task);
698 break; 706 break;
@@ -703,6 +711,7 @@ static int xs_tcp_send_request(struct rpc_task *task)
703 case -ECONNREFUSED: 711 case -ECONNREFUSED:
704 case -ENOTCONN: 712 case -ENOTCONN:
705 case -EADDRINUSE: 713 case -EADDRINUSE:
714 case -ENOBUFS:
706 case -EPIPE: 715 case -EPIPE:
707 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); 716 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
708 } 717 }
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index 84f77a054025..9f2add3cba26 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -171,8 +171,10 @@ int switchdev_port_attr_set(struct net_device *dev, struct switchdev_attr *attr)
171 * released. 171 * released.
172 */ 172 */
173 173
174 attr->trans = SWITCHDEV_TRANS_ABORT; 174 if (err != -EOPNOTSUPP) {
175 __switchdev_port_attr_set(dev, attr); 175 attr->trans = SWITCHDEV_TRANS_ABORT;
176 __switchdev_port_attr_set(dev, attr);
177 }
176 178
177 return err; 179 return err;
178 } 180 }
@@ -249,8 +251,10 @@ int switchdev_port_obj_add(struct net_device *dev, struct switchdev_obj *obj)
249 * released. 251 * released.
250 */ 252 */
251 253
252 obj->trans = SWITCHDEV_TRANS_ABORT; 254 if (err != -EOPNOTSUPP) {
253 __switchdev_port_obj_add(dev, obj); 255 obj->trans = SWITCHDEV_TRANS_ABORT;
256 __switchdev_port_obj_add(dev, obj);
257 }
254 258
255 return err; 259 return err;
256 } 260 }
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 46b6ed534ef2..3a7567f690f3 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -2007,6 +2007,7 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
2007 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 1); 2007 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 1);
2008 if (res) 2008 if (res)
2009 goto exit; 2009 goto exit;
2010 security_sk_clone(sock->sk, new_sock->sk);
2010 2011
2011 new_sk = new_sock->sk; 2012 new_sk = new_sock->sk;
2012 new_tsock = tipc_sk(new_sk); 2013 new_tsock = tipc_sk(new_sk);
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 915b328b9ac5..59cabc9bce69 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -797,23 +797,18 @@ static bool cfg80211_ir_permissive_chan(struct wiphy *wiphy,
797 return false; 797 return false;
798} 798}
799 799
800bool cfg80211_reg_can_beacon(struct wiphy *wiphy, 800static bool _cfg80211_reg_can_beacon(struct wiphy *wiphy,
801 struct cfg80211_chan_def *chandef, 801 struct cfg80211_chan_def *chandef,
802 enum nl80211_iftype iftype) 802 enum nl80211_iftype iftype,
803 bool check_no_ir)
803{ 804{
804 bool res; 805 bool res;
805 u32 prohibited_flags = IEEE80211_CHAN_DISABLED | 806 u32 prohibited_flags = IEEE80211_CHAN_DISABLED |
806 IEEE80211_CHAN_RADAR; 807 IEEE80211_CHAN_RADAR;
807 808
808 trace_cfg80211_reg_can_beacon(wiphy, chandef, iftype); 809 trace_cfg80211_reg_can_beacon(wiphy, chandef, iftype, check_no_ir);
809 810
810 /* 811 if (check_no_ir)
811 * Under certain conditions suggested by some regulatory bodies a
812 * GO/STA can IR on channels marked with IEEE80211_NO_IR. Set this flag
813 * only if such relaxations are not enabled and the conditions are not
814 * met.
815 */
816 if (!cfg80211_ir_permissive_chan(wiphy, iftype, chandef->chan))
817 prohibited_flags |= IEEE80211_CHAN_NO_IR; 812 prohibited_flags |= IEEE80211_CHAN_NO_IR;
818 813
819 if (cfg80211_chandef_dfs_required(wiphy, chandef, iftype) > 0 && 814 if (cfg80211_chandef_dfs_required(wiphy, chandef, iftype) > 0 &&
@@ -827,8 +822,36 @@ bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
827 trace_cfg80211_return_bool(res); 822 trace_cfg80211_return_bool(res);
828 return res; 823 return res;
829} 824}
825
826bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
827 struct cfg80211_chan_def *chandef,
828 enum nl80211_iftype iftype)
829{
830 return _cfg80211_reg_can_beacon(wiphy, chandef, iftype, true);
831}
830EXPORT_SYMBOL(cfg80211_reg_can_beacon); 832EXPORT_SYMBOL(cfg80211_reg_can_beacon);
831 833
834bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy,
835 struct cfg80211_chan_def *chandef,
836 enum nl80211_iftype iftype)
837{
838 bool check_no_ir;
839
840 ASSERT_RTNL();
841
842 /*
843 * Under certain conditions suggested by some regulatory bodies a
844 * GO/STA can IR on channels marked with IEEE80211_NO_IR. Set this flag
845 * only if such relaxations are not enabled and the conditions are not
846 * met.
847 */
848 check_no_ir = !cfg80211_ir_permissive_chan(wiphy, iftype,
849 chandef->chan);
850
851 return _cfg80211_reg_can_beacon(wiphy, chandef, iftype, check_no_ir);
852}
853EXPORT_SYMBOL(cfg80211_reg_can_beacon_relax);
854
832int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev, 855int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev,
833 struct cfg80211_chan_def *chandef) 856 struct cfg80211_chan_def *chandef)
834{ 857{
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index c264effd00a6..76b41578a838 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -2003,7 +2003,8 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
2003 switch (iftype) { 2003 switch (iftype) {
2004 case NL80211_IFTYPE_AP: 2004 case NL80211_IFTYPE_AP:
2005 case NL80211_IFTYPE_P2P_GO: 2005 case NL80211_IFTYPE_P2P_GO:
2006 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &chandef, iftype)) { 2006 if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &chandef,
2007 iftype)) {
2007 result = -EINVAL; 2008 result = -EINVAL;
2008 break; 2009 break;
2009 } 2010 }
@@ -3403,8 +3404,8 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
3403 } else if (!nl80211_get_ap_channel(rdev, &params)) 3404 } else if (!nl80211_get_ap_channel(rdev, &params))
3404 return -EINVAL; 3405 return -EINVAL;
3405 3406
3406 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef, 3407 if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &params.chandef,
3407 wdev->iftype)) 3408 wdev->iftype))
3408 return -EINVAL; 3409 return -EINVAL;
3409 3410
3410 if (info->attrs[NL80211_ATTR_ACL_POLICY]) { 3411 if (info->attrs[NL80211_ATTR_ACL_POLICY]) {
@@ -6492,8 +6493,8 @@ skip_beacons:
6492 if (err) 6493 if (err)
6493 return err; 6494 return err;
6494 6495
6495 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef, 6496 if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &params.chandef,
6496 wdev->iftype)) 6497 wdev->iftype))
6497 return -EINVAL; 6498 return -EINVAL;
6498 6499
6499 err = cfg80211_chandef_dfs_required(wdev->wiphy, 6500 err = cfg80211_chandef_dfs_required(wdev->wiphy,
@@ -10170,7 +10171,8 @@ static int nl80211_tdls_channel_switch(struct sk_buff *skb,
10170 return -EINVAL; 10171 return -EINVAL;
10171 10172
10172 /* we will be active on the TDLS link */ 10173 /* we will be active on the TDLS link */
10173 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &chandef, wdev->iftype)) 10174 if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &chandef,
10175 wdev->iftype))
10174 return -EINVAL; 10176 return -EINVAL;
10175 10177
10176 /* don't allow switching to DFS channels */ 10178 /* don't allow switching to DFS channels */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index d359e0610198..aa2d75482017 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -544,15 +544,15 @@ static int call_crda(const char *alpha2)
544 reg_regdb_query(alpha2); 544 reg_regdb_query(alpha2);
545 545
546 if (reg_crda_timeouts > REG_MAX_CRDA_TIMEOUTS) { 546 if (reg_crda_timeouts > REG_MAX_CRDA_TIMEOUTS) {
547 pr_info("Exceeded CRDA call max attempts. Not calling CRDA\n"); 547 pr_debug("Exceeded CRDA call max attempts. Not calling CRDA\n");
548 return -EINVAL; 548 return -EINVAL;
549 } 549 }
550 550
551 if (!is_world_regdom((char *) alpha2)) 551 if (!is_world_regdom((char *) alpha2))
552 pr_info("Calling CRDA for country: %c%c\n", 552 pr_debug("Calling CRDA for country: %c%c\n",
553 alpha2[0], alpha2[1]); 553 alpha2[0], alpha2[1]);
554 else 554 else
555 pr_info("Calling CRDA to update world regulatory domain\n"); 555 pr_debug("Calling CRDA to update world regulatory domain\n");
556 556
557 return kobject_uevent_env(&reg_pdev->dev.kobj, KOBJ_CHANGE, env); 557 return kobject_uevent_env(&reg_pdev->dev.kobj, KOBJ_CHANGE, env);
558} 558}
@@ -1589,7 +1589,7 @@ static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
1589 case NL80211_IFTYPE_AP: 1589 case NL80211_IFTYPE_AP:
1590 case NL80211_IFTYPE_P2P_GO: 1590 case NL80211_IFTYPE_P2P_GO:
1591 case NL80211_IFTYPE_ADHOC: 1591 case NL80211_IFTYPE_ADHOC:
1592 return cfg80211_reg_can_beacon(wiphy, &chandef, iftype); 1592 return cfg80211_reg_can_beacon_relax(wiphy, &chandef, iftype);
1593 case NL80211_IFTYPE_STATION: 1593 case NL80211_IFTYPE_STATION:
1594 case NL80211_IFTYPE_P2P_CLIENT: 1594 case NL80211_IFTYPE_P2P_CLIENT:
1595 return cfg80211_chandef_usable(wiphy, &chandef, 1595 return cfg80211_chandef_usable(wiphy, &chandef,
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index af3617c9879e..a808279a432a 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -2358,20 +2358,23 @@ TRACE_EVENT(cfg80211_cqm_rssi_notify,
2358 2358
2359TRACE_EVENT(cfg80211_reg_can_beacon, 2359TRACE_EVENT(cfg80211_reg_can_beacon,
2360 TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef, 2360 TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef,
2361 enum nl80211_iftype iftype), 2361 enum nl80211_iftype iftype, bool check_no_ir),
2362 TP_ARGS(wiphy, chandef, iftype), 2362 TP_ARGS(wiphy, chandef, iftype, check_no_ir),
2363 TP_STRUCT__entry( 2363 TP_STRUCT__entry(
2364 WIPHY_ENTRY 2364 WIPHY_ENTRY
2365 CHAN_DEF_ENTRY 2365 CHAN_DEF_ENTRY
2366 __field(enum nl80211_iftype, iftype) 2366 __field(enum nl80211_iftype, iftype)
2367 __field(bool, check_no_ir)
2367 ), 2368 ),
2368 TP_fast_assign( 2369 TP_fast_assign(
2369 WIPHY_ASSIGN; 2370 WIPHY_ASSIGN;
2370 CHAN_DEF_ASSIGN(chandef); 2371 CHAN_DEF_ASSIGN(chandef);
2371 __entry->iftype = iftype; 2372 __entry->iftype = iftype;
2373 __entry->check_no_ir = check_no_ir;
2372 ), 2374 ),
2373 TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT ", iftype=%d", 2375 TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT ", iftype=%d check_no_ir=%s",
2374 WIPHY_PR_ARG, CHAN_DEF_PR_ARG, __entry->iftype) 2376 WIPHY_PR_ARG, CHAN_DEF_PR_ARG, __entry->iftype,
2377 BOOL_TO_STR(__entry->check_no_ir))
2375); 2378);
2376 2379
2377TRACE_EVENT(cfg80211_chandef_dfs_required, 2380TRACE_EVENT(cfg80211_chandef_dfs_required,
diff --git a/samples/trace_events/trace-events-sample.h b/samples/trace_events/trace-events-sample.h
index 8965d1bb8811..125d6402f64f 100644
--- a/samples/trace_events/trace-events-sample.h
+++ b/samples/trace_events/trace-events-sample.h
@@ -168,7 +168,10 @@
168 * 168 *
169 * For __dynamic_array(int, foo, bar) use __get_dynamic_array(foo) 169 * For __dynamic_array(int, foo, bar) use __get_dynamic_array(foo)
170 * Use __get_dynamic_array_len(foo) to get the length of the array 170 * Use __get_dynamic_array_len(foo) to get the length of the array
171 * saved. 171 * saved. Note, __get_dynamic_array_len() returns the total allocated
172 * length of the dynamic array; __print_array() expects the second
173 * parameter to be the number of elements. To get that, the array length
174 * needs to be divided by the element size.
172 * 175 *
173 * For __string(foo, bar) use __get_str(foo) 176 * For __string(foo, bar) use __get_str(foo)
174 * 177 *
@@ -288,7 +291,7 @@ TRACE_EVENT(foo_bar,
288 * This prints out the array that is defined by __array in a nice format. 291 * This prints out the array that is defined by __array in a nice format.
289 */ 292 */
290 __print_array(__get_dynamic_array(list), 293 __print_array(__get_dynamic_array(list),
291 __get_dynamic_array_len(list), 294 __get_dynamic_array_len(list) / sizeof(int),
292 sizeof(int)), 295 sizeof(int)),
293 __get_str(str), __get_bitmask(cpus)) 296 __get_str(str), __get_bitmask(cpus))
294); 297);
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 90e1edc8dd42..d5c8e9a3a73c 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -2599,7 +2599,7 @@ sub process {
2599# if LONG_LINE is ignored, the other 2 types are also ignored 2599# if LONG_LINE is ignored, the other 2 types are also ignored
2600# 2600#
2601 2601
2602 if ($length > $max_line_length) { 2602 if ($line =~ /^\+/ && $length > $max_line_length) {
2603 my $msg_type = "LONG_LINE"; 2603 my $msg_type = "LONG_LINE";
2604 2604
2605 # Check the allowed long line types first 2605 # Check the allowed long line types first
diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl
index 9cb8522d8d22..f3d3fb42b873 100755
--- a/scripts/kconfig/streamline_config.pl
+++ b/scripts/kconfig/streamline_config.pl
@@ -137,7 +137,7 @@ my $ksource = ($ARGV[0] ? $ARGV[0] : '.');
137my $kconfig = $ARGV[1]; 137my $kconfig = $ARGV[1];
138my $lsmod_file = $ENV{'LSMOD'}; 138my $lsmod_file = $ENV{'LSMOD'};
139 139
140my @makefiles = `find $ksource -name Makefile 2>/dev/null`; 140my @makefiles = `find $ksource -name Makefile -or -name Kbuild 2>/dev/null`;
141chomp @makefiles; 141chomp @makefiles;
142 142
143my %depends; 143my %depends;
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index e72548b5897e..d33437007ad2 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -1181,9 +1181,11 @@ void __key_link_end(struct key *keyring,
1181 if (index_key->type == &key_type_keyring) 1181 if (index_key->type == &key_type_keyring)
1182 up_write(&keyring_serialise_link_sem); 1182 up_write(&keyring_serialise_link_sem);
1183 1183
1184 if (edit && !edit->dead_leaf) { 1184 if (edit) {
1185 key_payload_reserve(keyring, 1185 if (!edit->dead_leaf) {
1186 keyring->datalen - KEYQUOTA_LINK_BYTES); 1186 key_payload_reserve(keyring,
1187 keyring->datalen - KEYQUOTA_LINK_BYTES);
1188 }
1187 assoc_array_cancel_edit(edit); 1189 assoc_array_cancel_edit(edit);
1188 } 1190 }
1189 up_write(&keyring->sem); 1191 up_write(&keyring->sem);
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
index 9ed32502470e..5ebb89687936 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
@@ -406,6 +406,7 @@ static __init int yama_init(void)
406 */ 406 */
407 if (!security_module_enable("yama")) 407 if (!security_module_enable("yama"))
408 return 0; 408 return 0;
409 yama_add_hooks();
409#endif 410#endif
410 pr_info("Yama: becoming mindful.\n"); 411 pr_info("Yama: becoming mindful.\n");
411 412
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index d126c03361ae..75888dd38a7f 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -85,7 +85,7 @@ static DECLARE_RWSEM(snd_pcm_link_rwsem);
85void snd_pcm_stream_lock(struct snd_pcm_substream *substream) 85void snd_pcm_stream_lock(struct snd_pcm_substream *substream)
86{ 86{
87 if (substream->pcm->nonatomic) { 87 if (substream->pcm->nonatomic) {
88 down_read(&snd_pcm_link_rwsem); 88 down_read_nested(&snd_pcm_link_rwsem, SINGLE_DEPTH_NESTING);
89 mutex_lock(&substream->self_group.mutex); 89 mutex_lock(&substream->self_group.mutex);
90 } else { 90 } else {
91 read_lock(&snd_pcm_link_rwlock); 91 read_lock(&snd_pcm_link_rwlock);
diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
index 7bb988fa6b6d..2a153d260836 100644
--- a/sound/firewire/amdtp.c
+++ b/sound/firewire/amdtp.c
@@ -740,8 +740,9 @@ static int handle_in_packet(struct amdtp_stream *s,
740 s->data_block_counter != UINT_MAX) 740 s->data_block_counter != UINT_MAX)
741 data_block_counter = s->data_block_counter; 741 data_block_counter = s->data_block_counter;
742 742
743 if (((s->flags & CIP_SKIP_DBC_ZERO_CHECK) && data_block_counter == 0) || 743 if (((s->flags & CIP_SKIP_DBC_ZERO_CHECK) &&
744 (s->data_block_counter == UINT_MAX)) { 744 data_block_counter == s->tx_first_dbc) ||
745 s->data_block_counter == UINT_MAX) {
745 lost = false; 746 lost = false;
746 } else if (!(s->flags & CIP_DBC_IS_END_EVENT)) { 747 } else if (!(s->flags & CIP_DBC_IS_END_EVENT)) {
747 lost = data_block_counter != s->data_block_counter; 748 lost = data_block_counter != s->data_block_counter;
diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
index 26b909329e54..b2cf9e75693b 100644
--- a/sound/firewire/amdtp.h
+++ b/sound/firewire/amdtp.h
@@ -157,6 +157,8 @@ struct amdtp_stream {
157 157
158 /* quirk: fixed interval of dbc between previos/current packets. */ 158 /* quirk: fixed interval of dbc between previos/current packets. */
159 unsigned int tx_dbc_interval; 159 unsigned int tx_dbc_interval;
160 /* quirk: indicate the value of dbc field in a first packet. */
161 unsigned int tx_first_dbc;
160 162
161 bool callbacked; 163 bool callbacked;
162 wait_queue_head_t callback_wait; 164 wait_queue_head_t callback_wait;
diff --git a/sound/firewire/fireworks/fireworks.c b/sound/firewire/fireworks/fireworks.c
index 2682e7e3e5c9..c94a432f7cc6 100644
--- a/sound/firewire/fireworks/fireworks.c
+++ b/sound/firewire/fireworks/fireworks.c
@@ -248,8 +248,16 @@ efw_probe(struct fw_unit *unit,
248 err = get_hardware_info(efw); 248 err = get_hardware_info(efw);
249 if (err < 0) 249 if (err < 0)
250 goto error; 250 goto error;
251 /* AudioFire8 (since 2009) and AudioFirePre8 */
251 if (entry->model_id == MODEL_ECHO_AUDIOFIRE_9) 252 if (entry->model_id == MODEL_ECHO_AUDIOFIRE_9)
252 efw->is_af9 = true; 253 efw->is_af9 = true;
254 /* These models uses the same firmware. */
255 if (entry->model_id == MODEL_ECHO_AUDIOFIRE_2 ||
256 entry->model_id == MODEL_ECHO_AUDIOFIRE_4 ||
257 entry->model_id == MODEL_ECHO_AUDIOFIRE_9 ||
258 entry->model_id == MODEL_GIBSON_RIP ||
259 entry->model_id == MODEL_GIBSON_GOLDTOP)
260 efw->is_fireworks3 = true;
253 261
254 snd_efw_proc_init(efw); 262 snd_efw_proc_init(efw);
255 263
diff --git a/sound/firewire/fireworks/fireworks.h b/sound/firewire/fireworks/fireworks.h
index 4f0201a95222..084d414b228c 100644
--- a/sound/firewire/fireworks/fireworks.h
+++ b/sound/firewire/fireworks/fireworks.h
@@ -71,6 +71,7 @@ struct snd_efw {
71 71
72 /* for quirks */ 72 /* for quirks */
73 bool is_af9; 73 bool is_af9;
74 bool is_fireworks3;
74 u32 firmware_version; 75 u32 firmware_version;
75 76
76 unsigned int midi_in_ports; 77 unsigned int midi_in_ports;
diff --git a/sound/firewire/fireworks/fireworks_stream.c b/sound/firewire/fireworks/fireworks_stream.c
index c55db1bddc80..7e353f1f7bff 100644
--- a/sound/firewire/fireworks/fireworks_stream.c
+++ b/sound/firewire/fireworks/fireworks_stream.c
@@ -172,6 +172,15 @@ int snd_efw_stream_init_duplex(struct snd_efw *efw)
172 efw->tx_stream.flags |= CIP_DBC_IS_END_EVENT; 172 efw->tx_stream.flags |= CIP_DBC_IS_END_EVENT;
173 /* Fireworks reset dbc at bus reset. */ 173 /* Fireworks reset dbc at bus reset. */
174 efw->tx_stream.flags |= CIP_SKIP_DBC_ZERO_CHECK; 174 efw->tx_stream.flags |= CIP_SKIP_DBC_ZERO_CHECK;
175 /*
176 * But Recent firmwares starts packets with non-zero dbc.
177 * Driver version 5.7.6 installs firmware version 5.7.3.
178 */
179 if (efw->is_fireworks3 &&
180 (efw->firmware_version == 0x5070000 ||
181 efw->firmware_version == 0x5070300 ||
182 efw->firmware_version == 0x5080000))
183 efw->tx_stream.tx_first_dbc = 0x02;
175 /* AudioFire9 always reports wrong dbs. */ 184 /* AudioFire9 always reports wrong dbs. */
176 if (efw->is_af9) 185 if (efw->is_af9)
177 efw->tx_stream.flags |= CIP_WRONG_DBS; 186 efw->tx_stream.flags |= CIP_WRONG_DBS;
diff --git a/sound/hda/ext/hdac_ext_controller.c b/sound/hda/ext/hdac_ext_controller.c
index b2da19b60f4e..358f16195483 100644
--- a/sound/hda/ext/hdac_ext_controller.c
+++ b/sound/hda/ext/hdac_ext_controller.c
@@ -44,16 +44,10 @@ int snd_hdac_ext_bus_parse_capabilities(struct hdac_ext_bus *ebus)
44 44
45 offset = snd_hdac_chip_readl(bus, LLCH); 45 offset = snd_hdac_chip_readl(bus, LLCH);
46 46
47 if (offset < 0)
48 return -EIO;
49
50 /* Lets walk the linked capabilities list */ 47 /* Lets walk the linked capabilities list */
51 do { 48 do {
52 cur_cap = _snd_hdac_chip_read(l, bus, offset); 49 cur_cap = _snd_hdac_chip_read(l, bus, offset);
53 50
54 if (cur_cap < 0)
55 return -EIO;
56
57 dev_dbg(bus->dev, "Capability version: 0x%x\n", 51 dev_dbg(bus->dev, "Capability version: 0x%x\n",
58 ((cur_cap & AZX_CAP_HDR_VER_MASK) >> AZX_CAP_HDR_VER_OFF)); 52 ((cur_cap & AZX_CAP_HDR_VER_MASK) >> AZX_CAP_HDR_VER_OFF));
59 53
diff --git a/sound/hda/ext/hdac_ext_stream.c b/sound/hda/ext/hdac_ext_stream.c
index f8ffbdbb450d..3de47dd1a76d 100644
--- a/sound/hda/ext/hdac_ext_stream.c
+++ b/sound/hda/ext/hdac_ext_stream.c
@@ -299,7 +299,7 @@ hdac_ext_host_stream_assign(struct hdac_ext_bus *ebus,
299 if (stream->direction != substream->stream) 299 if (stream->direction != substream->stream)
300 continue; 300 continue;
301 301
302 if (stream->opened) { 302 if (!stream->opened) {
303 if (!hstream->decoupled) 303 if (!hstream->decoupled)
304 snd_hdac_ext_stream_decouple(ebus, hstream, true); 304 snd_hdac_ext_stream_decouple(ebus, hstream, true);
305 res = hstream; 305 res = hstream;
diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
index 442500e06b7c..5676b849379d 100644
--- a/sound/hda/hdac_i915.c
+++ b/sound/hda/hdac_i915.c
@@ -56,8 +56,11 @@ int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
56 enable ? "enable" : "disable"); 56 enable ? "enable" : "disable");
57 57
58 if (enable) { 58 if (enable) {
59 if (!bus->i915_power_refcount++) 59 if (!bus->i915_power_refcount++) {
60 acomp->ops->get_power(acomp->dev); 60 acomp->ops->get_power(acomp->dev);
61 snd_hdac_set_codec_wakeup(bus, true);
62 snd_hdac_set_codec_wakeup(bus, false);
63 }
61 } else { 64 } else {
62 WARN_ON(!bus->i915_power_refcount); 65 WARN_ON(!bus->i915_power_refcount);
63 if (!--bus->i915_power_refcount) 66 if (!--bus->i915_power_refcount)
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index ac0db1679f09..b077bb644434 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -5175,7 +5175,7 @@ static int alt_playback_pcm_open(struct hda_pcm_stream *hinfo,
5175 int err = 0; 5175 int err = 0;
5176 5176
5177 mutex_lock(&spec->pcm_mutex); 5177 mutex_lock(&spec->pcm_mutex);
5178 if (!spec->indep_hp_enabled) 5178 if (spec->indep_hp && !spec->indep_hp_enabled)
5179 err = -EBUSY; 5179 err = -EBUSY;
5180 else 5180 else
5181 spec->active_streams |= 1 << STREAM_INDEP_HP; 5181 spec->active_streams |= 1 << STREAM_INDEP_HP;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 745535d1840a..c38c68f57938 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -867,7 +867,7 @@ static int azx_suspend(struct device *dev)
867 867
868 chip = card->private_data; 868 chip = card->private_data;
869 hda = container_of(chip, struct hda_intel, chip); 869 hda = container_of(chip, struct hda_intel, chip);
870 if (chip->disabled || hda->init_failed) 870 if (chip->disabled || hda->init_failed || !chip->running)
871 return 0; 871 return 0;
872 872
873 bus = azx_bus(chip); 873 bus = azx_bus(chip);
@@ -902,7 +902,7 @@ static int azx_resume(struct device *dev)
902 902
903 chip = card->private_data; 903 chip = card->private_data;
904 hda = container_of(chip, struct hda_intel, chip); 904 hda = container_of(chip, struct hda_intel, chip);
905 if (chip->disabled || hda->init_failed) 905 if (chip->disabled || hda->init_failed || !chip->running)
906 return 0; 906 return 0;
907 907
908 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL 908 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL
@@ -979,14 +979,16 @@ static int azx_runtime_resume(struct device *dev)
979 if (!azx_has_pm_runtime(chip)) 979 if (!azx_has_pm_runtime(chip))
980 return 0; 980 return 0;
981 981
982 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL 982 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
983 && hda->need_i915_power) { 983 bus = azx_bus(chip);
984 bus = azx_bus(chip); 984 if (hda->need_i915_power) {
985 snd_hdac_display_power(bus, true); 985 snd_hdac_display_power(bus, true);
986 haswell_set_bclk(hda); 986 haswell_set_bclk(hda);
987 /* toggle codec wakeup bit for STATESTS read */ 987 } else {
988 snd_hdac_set_codec_wakeup(bus, true); 988 /* toggle codec wakeup bit for STATESTS read */
989 snd_hdac_set_codec_wakeup(bus, false); 989 snd_hdac_set_codec_wakeup(bus, true);
990 snd_hdac_set_codec_wakeup(bus, false);
991 }
990 } 992 }
991 993
992 /* Read STATESTS before controller reset */ 994 /* Read STATESTS before controller reset */
@@ -1025,7 +1027,7 @@ static int azx_runtime_idle(struct device *dev)
1025 return 0; 1027 return 0;
1026 1028
1027 if (!power_save_controller || !azx_has_pm_runtime(chip) || 1029 if (!power_save_controller || !azx_has_pm_runtime(chip) ||
1028 azx_bus(chip)->codec_powered) 1030 azx_bus(chip)->codec_powered || !chip->running)
1029 return -EBUSY; 1031 return -EBUSY;
1030 1032
1031 return 0; 1033 return 0;
@@ -2182,6 +2184,8 @@ static const struct pci_device_id azx_ids[] = {
2182 /* ATI HDMI */ 2184 /* ATI HDMI */
2183 { PCI_DEVICE(0x1002, 0x1308), 2185 { PCI_DEVICE(0x1002, 0x1308),
2184 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, 2186 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
2187 { PCI_DEVICE(0x1002, 0x157a),
2188 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
2185 { PCI_DEVICE(0x1002, 0x793b), 2189 { PCI_DEVICE(0x1002, 0x793b),
2186 .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI }, 2190 .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
2187 { PCI_DEVICE(0x1002, 0x7919), 2191 { PCI_DEVICE(0x1002, 0x7919),
@@ -2236,8 +2240,14 @@ static const struct pci_device_id azx_ids[] = {
2236 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, 2240 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
2237 { PCI_DEVICE(0x1002, 0xaab0), 2241 { PCI_DEVICE(0x1002, 0xaab0),
2238 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, 2242 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
2243 { PCI_DEVICE(0x1002, 0xaac0),
2244 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
2239 { PCI_DEVICE(0x1002, 0xaac8), 2245 { PCI_DEVICE(0x1002, 0xaac8),
2240 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, 2246 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
2247 { PCI_DEVICE(0x1002, 0xaad8),
2248 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
2249 { PCI_DEVICE(0x1002, 0xaae8),
2250 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
2241 /* VIA VT8251/VT8237A */ 2251 /* VIA VT8251/VT8237A */
2242 { PCI_DEVICE(0x1106, 0x3288), 2252 { PCI_DEVICE(0x1106, 0x3288),
2243 .driver_data = AZX_DRIVER_VIA | AZX_DCAPS_POSFIX_VIA }, 2253 .driver_data = AZX_DRIVER_VIA | AZX_DCAPS_POSFIX_VIA },
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index 25ccf781fbe7..584a0343ab0c 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -999,9 +999,7 @@ static void cs4210_spdif_automute(struct hda_codec *codec,
999 999
1000 spec->spdif_present = spdif_present; 1000 spec->spdif_present = spdif_present;
1001 /* SPDIF TX on/off */ 1001 /* SPDIF TX on/off */
1002 if (spdif_present) 1002 snd_hda_set_pin_ctl(codec, spdif_pin, spdif_present ? PIN_OUT : 0);
1003 snd_hda_set_pin_ctl(codec, spdif_pin,
1004 spdif_present ? PIN_OUT : 0);
1005 1003
1006 cs_automute(codec); 1004 cs_automute(codec);
1007} 1005}
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 2f2433845d04..a97db5fc8a15 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -3512,6 +3512,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
3512{ .id = 0x10de0070, .name = "GPU 70 HDMI/DP", .patch = patch_nvhdmi }, 3512{ .id = 0x10de0070, .name = "GPU 70 HDMI/DP", .patch = patch_nvhdmi },
3513{ .id = 0x10de0071, .name = "GPU 71 HDMI/DP", .patch = patch_nvhdmi }, 3513{ .id = 0x10de0071, .name = "GPU 71 HDMI/DP", .patch = patch_nvhdmi },
3514{ .id = 0x10de0072, .name = "GPU 72 HDMI/DP", .patch = patch_nvhdmi }, 3514{ .id = 0x10de0072, .name = "GPU 72 HDMI/DP", .patch = patch_nvhdmi },
3515{ .id = 0x10de007d, .name = "GPU 7d HDMI/DP", .patch = patch_nvhdmi },
3515{ .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch }, 3516{ .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch },
3516{ .id = 0x11069f80, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi }, 3517{ .id = 0x11069f80, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi },
3517{ .id = 0x11069f81, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi }, 3518{ .id = 0x11069f81, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi },
@@ -3527,6 +3528,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
3527{ .id = 0x80862807, .name = "Haswell HDMI", .patch = patch_generic_hdmi }, 3528{ .id = 0x80862807, .name = "Haswell HDMI", .patch = patch_generic_hdmi },
3528{ .id = 0x80862808, .name = "Broadwell HDMI", .patch = patch_generic_hdmi }, 3529{ .id = 0x80862808, .name = "Broadwell HDMI", .patch = patch_generic_hdmi },
3529{ .id = 0x80862809, .name = "Skylake HDMI", .patch = patch_generic_hdmi }, 3530{ .id = 0x80862809, .name = "Skylake HDMI", .patch = patch_generic_hdmi },
3531{ .id = 0x8086280a, .name = "Broxton HDMI", .patch = patch_generic_hdmi },
3530{ .id = 0x80862880, .name = "CedarTrail HDMI", .patch = patch_generic_hdmi }, 3532{ .id = 0x80862880, .name = "CedarTrail HDMI", .patch = patch_generic_hdmi },
3531{ .id = 0x80862882, .name = "Valleyview2 HDMI", .patch = patch_generic_hdmi }, 3533{ .id = 0x80862882, .name = "Valleyview2 HDMI", .patch = patch_generic_hdmi },
3532{ .id = 0x80862883, .name = "Braswell HDMI", .patch = patch_generic_hdmi }, 3534{ .id = 0x80862883, .name = "Braswell HDMI", .patch = patch_generic_hdmi },
@@ -3575,6 +3577,7 @@ MODULE_ALIAS("snd-hda-codec-id:10de0067");
3575MODULE_ALIAS("snd-hda-codec-id:10de0070"); 3577MODULE_ALIAS("snd-hda-codec-id:10de0070");
3576MODULE_ALIAS("snd-hda-codec-id:10de0071"); 3578MODULE_ALIAS("snd-hda-codec-id:10de0071");
3577MODULE_ALIAS("snd-hda-codec-id:10de0072"); 3579MODULE_ALIAS("snd-hda-codec-id:10de0072");
3580MODULE_ALIAS("snd-hda-codec-id:10de007d");
3578MODULE_ALIAS("snd-hda-codec-id:10de8001"); 3581MODULE_ALIAS("snd-hda-codec-id:10de8001");
3579MODULE_ALIAS("snd-hda-codec-id:11069f80"); 3582MODULE_ALIAS("snd-hda-codec-id:11069f80");
3580MODULE_ALIAS("snd-hda-codec-id:11069f81"); 3583MODULE_ALIAS("snd-hda-codec-id:11069f81");
@@ -3591,6 +3594,7 @@ MODULE_ALIAS("snd-hda-codec-id:80862806");
3591MODULE_ALIAS("snd-hda-codec-id:80862807"); 3594MODULE_ALIAS("snd-hda-codec-id:80862807");
3592MODULE_ALIAS("snd-hda-codec-id:80862808"); 3595MODULE_ALIAS("snd-hda-codec-id:80862808");
3593MODULE_ALIAS("snd-hda-codec-id:80862809"); 3596MODULE_ALIAS("snd-hda-codec-id:80862809");
3597MODULE_ALIAS("snd-hda-codec-id:8086280a");
3594MODULE_ALIAS("snd-hda-codec-id:80862880"); 3598MODULE_ALIAS("snd-hda-codec-id:80862880");
3595MODULE_ALIAS("snd-hda-codec-id:80862882"); 3599MODULE_ALIAS("snd-hda-codec-id:80862882");
3596MODULE_ALIAS("snd-hda-codec-id:80862883"); 3600MODULE_ALIAS("snd-hda-codec-id:80862883");
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index b3b44681d3cf..374ea53288ca 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -2222,7 +2222,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
2222 SND_PCI_QUIRK(0x106b, 0x4300, "iMac 9,1", ALC889_FIXUP_IMAC91_VREF), 2222 SND_PCI_QUIRK(0x106b, 0x4300, "iMac 9,1", ALC889_FIXUP_IMAC91_VREF),
2223 SND_PCI_QUIRK(0x106b, 0x4600, "MacbookPro 5,2", ALC889_FIXUP_IMAC91_VREF), 2223 SND_PCI_QUIRK(0x106b, 0x4600, "MacbookPro 5,2", ALC889_FIXUP_IMAC91_VREF),
2224 SND_PCI_QUIRK(0x106b, 0x4900, "iMac 9,1 Aluminum", ALC889_FIXUP_IMAC91_VREF), 2224 SND_PCI_QUIRK(0x106b, 0x4900, "iMac 9,1 Aluminum", ALC889_FIXUP_IMAC91_VREF),
2225 SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_IMAC91_VREF), 2225 SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_MBA11_VREF),
2226 2226
2227 SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD), 2227 SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
2228 SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD), 2228 SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
@@ -4441,6 +4441,55 @@ static void alc290_fixup_mono_speakers(struct hda_codec *codec,
4441 } 4441 }
4442} 4442}
4443 4443
4444/* Hook to update amp GPIO4 for automute */
4445static void alc280_hp_gpio4_automute_hook(struct hda_codec *codec,
4446 struct hda_jack_callback *jack)
4447{
4448 struct alc_spec *spec = codec->spec;
4449
4450 snd_hda_gen_hp_automute(codec, jack);
4451 /* mute_led_polarity is set to 0, so we pass inverted value here */
4452 alc_update_gpio_led(codec, 0x10, !spec->gen.hp_jack_present);
4453}
4454
4455/* Manage GPIOs for HP EliteBook Folio 9480m.
4456 *
4457 * GPIO4 is the headphone amplifier power control
4458 * GPIO3 is the audio output mute indicator LED
4459 */
4460
4461static void alc280_fixup_hp_9480m(struct hda_codec *codec,
4462 const struct hda_fixup *fix,
4463 int action)
4464{
4465 struct alc_spec *spec = codec->spec;
4466 static const struct hda_verb gpio_init[] = {
4467 { 0x01, AC_VERB_SET_GPIO_MASK, 0x18 },
4468 { 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x18 },
4469 {}
4470 };
4471
4472 if (action == HDA_FIXUP_ACT_PRE_PROBE) {
4473 /* Set the hooks to turn the headphone amp on/off
4474 * as needed
4475 */
4476 spec->gen.vmaster_mute.hook = alc_fixup_gpio_mute_hook;
4477 spec->gen.hp_automute_hook = alc280_hp_gpio4_automute_hook;
4478
4479 /* The GPIOs are currently off */
4480 spec->gpio_led = 0;
4481
4482 /* GPIO3 is connected to the output mute LED,
4483 * high is on, low is off
4484 */
4485 spec->mute_led_polarity = 0;
4486 spec->gpio_mute_led_mask = 0x08;
4487
4488 /* Initialize GPIO configuration */
4489 snd_hda_add_verbs(codec, gpio_init);
4490 }
4491}
4492
4444/* for hda_fixup_thinkpad_acpi() */ 4493/* for hda_fixup_thinkpad_acpi() */
4445#include "thinkpad_helper.c" 4494#include "thinkpad_helper.c"
4446 4495
@@ -4521,6 +4570,7 @@ enum {
4521 ALC286_FIXUP_HP_GPIO_LED, 4570 ALC286_FIXUP_HP_GPIO_LED,
4522 ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY, 4571 ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY,
4523 ALC280_FIXUP_HP_DOCK_PINS, 4572 ALC280_FIXUP_HP_DOCK_PINS,
4573 ALC280_FIXUP_HP_9480M,
4524 ALC288_FIXUP_DELL_HEADSET_MODE, 4574 ALC288_FIXUP_DELL_HEADSET_MODE,
4525 ALC288_FIXUP_DELL1_MIC_NO_PRESENCE, 4575 ALC288_FIXUP_DELL1_MIC_NO_PRESENCE,
4526 ALC288_FIXUP_DELL_XPS_13_GPIO6, 4576 ALC288_FIXUP_DELL_XPS_13_GPIO6,
@@ -5011,7 +5061,7 @@ static const struct hda_fixup alc269_fixups[] = {
5011 { 0x14, 0x90170110 }, 5061 { 0x14, 0x90170110 },
5012 { 0x17, 0x40000008 }, 5062 { 0x17, 0x40000008 },
5013 { 0x18, 0x411111f0 }, 5063 { 0x18, 0x411111f0 },
5014 { 0x19, 0x411111f0 }, 5064 { 0x19, 0x01a1913c },
5015 { 0x1a, 0x411111f0 }, 5065 { 0x1a, 0x411111f0 },
5016 { 0x1b, 0x411111f0 }, 5066 { 0x1b, 0x411111f0 },
5017 { 0x1d, 0x40f89b2d }, 5067 { 0x1d, 0x40f89b2d },
@@ -5043,6 +5093,10 @@ static const struct hda_fixup alc269_fixups[] = {
5043 .chained = true, 5093 .chained = true,
5044 .chain_id = ALC280_FIXUP_HP_GPIO4 5094 .chain_id = ALC280_FIXUP_HP_GPIO4
5045 }, 5095 },
5096 [ALC280_FIXUP_HP_9480M] = {
5097 .type = HDA_FIXUP_FUNC,
5098 .v.func = alc280_fixup_hp_9480m,
5099 },
5046 [ALC288_FIXUP_DELL_HEADSET_MODE] = { 5100 [ALC288_FIXUP_DELL_HEADSET_MODE] = {
5047 .type = HDA_FIXUP_FUNC, 5101 .type = HDA_FIXUP_FUNC,
5048 .v.func = alc_fixup_headset_mode_dell_alc288, 5102 .v.func = alc_fixup_headset_mode_dell_alc288,
@@ -5131,9 +5185,12 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5131 SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5185 SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5132 SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5186 SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5133 SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC288_FIXUP_DELL_XPS_13), 5187 SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC288_FIXUP_DELL_XPS_13),
5188 SND_PCI_QUIRK(0x1028, 0x069a, "Dell Vostro 5480", ALC290_FIXUP_SUBWOOFER_HSJACK),
5134 SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 5189 SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
5135 SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5190 SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5136 SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5191 SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5192 SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
5193 SND_PCI_QUIRK(0x1028, 0x06db, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
5137 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5194 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5138 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5195 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5139 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), 5196 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -5161,6 +5218,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5161 SND_PCI_QUIRK(0x103c, 0x22b7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 5218 SND_PCI_QUIRK(0x103c, 0x22b7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
5162 SND_PCI_QUIRK(0x103c, 0x22bf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 5219 SND_PCI_QUIRK(0x103c, 0x22bf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
5163 SND_PCI_QUIRK(0x103c, 0x22cf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 5220 SND_PCI_QUIRK(0x103c, 0x22cf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
5221 SND_PCI_QUIRK(0x103c, 0x22db, "HP", ALC280_FIXUP_HP_9480M),
5164 SND_PCI_QUIRK(0x103c, 0x22dc, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), 5222 SND_PCI_QUIRK(0x103c, 0x22dc, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
5165 SND_PCI_QUIRK(0x103c, 0x22fb, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), 5223 SND_PCI_QUIRK(0x103c, 0x22fb, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
5166 /* ALC290 */ 5224 /* ALC290 */
@@ -5234,6 +5292,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5234 SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad T440s", ALC292_FIXUP_TPT440_DOCK), 5292 SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad T440s", ALC292_FIXUP_TPT440_DOCK),
5235 SND_PCI_QUIRK(0x17aa, 0x220e, "Thinkpad T440p", ALC292_FIXUP_TPT440_DOCK), 5293 SND_PCI_QUIRK(0x17aa, 0x220e, "Thinkpad T440p", ALC292_FIXUP_TPT440_DOCK),
5236 SND_PCI_QUIRK(0x17aa, 0x2210, "Thinkpad T540p", ALC292_FIXUP_TPT440_DOCK), 5294 SND_PCI_QUIRK(0x17aa, 0x2210, "Thinkpad T540p", ALC292_FIXUP_TPT440_DOCK),
5295 SND_PCI_QUIRK(0x17aa, 0x2211, "Thinkpad W541", ALC292_FIXUP_TPT440_DOCK),
5237 SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK), 5296 SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK),
5238 SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK), 5297 SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK),
5239 SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5298 SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -5343,8 +5402,6 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
5343 {0x19, 0x411111f0}, \ 5402 {0x19, 0x411111f0}, \
5344 {0x1a, 0x411111f0}, \ 5403 {0x1a, 0x411111f0}, \
5345 {0x1b, 0x411111f0}, \ 5404 {0x1b, 0x411111f0}, \
5346 {0x1d, 0x40700001}, \
5347 {0x1e, 0x411111f0}, \
5348 {0x21, 0x02211020} 5405 {0x21, 0x02211020}
5349 5406
5350#define ALC282_STANDARD_PINS \ 5407#define ALC282_STANDARD_PINS \
@@ -5375,8 +5432,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
5375 {0x15, 0x0221401f}, \ 5432 {0x15, 0x0221401f}, \
5376 {0x1a, 0x411111f0}, \ 5433 {0x1a, 0x411111f0}, \
5377 {0x1b, 0x411111f0}, \ 5434 {0x1b, 0x411111f0}, \
5378 {0x1d, 0x40700001}, \ 5435 {0x1d, 0x40700001}
5379 {0x1e, 0x411111f0}
5380 5436
5381#define ALC298_STANDARD_PINS \ 5437#define ALC298_STANDARD_PINS \
5382 {0x18, 0x411111f0}, \ 5438 {0x18, 0x411111f0}, \
@@ -5408,6 +5464,39 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
5408 {0x1d, 0x40700001}, 5464 {0x1d, 0x40700001},
5409 {0x21, 0x02211030}), 5465 {0x21, 0x02211030}),
5410 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, 5466 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5467 {0x12, 0x40000000},
5468 {0x14, 0x90170130},
5469 {0x17, 0x411111f0},
5470 {0x18, 0x411111f0},
5471 {0x19, 0x411111f0},
5472 {0x1a, 0x411111f0},
5473 {0x1b, 0x01014020},
5474 {0x1d, 0x4054c029},
5475 {0x1e, 0x411111f0},
5476 {0x21, 0x0221103f}),
5477 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5478 {0x12, 0x40000000},
5479 {0x14, 0x90170150},
5480 {0x17, 0x411111f0},
5481 {0x18, 0x411111f0},
5482 {0x19, 0x411111f0},
5483 {0x1a, 0x411111f0},
5484 {0x1b, 0x02011020},
5485 {0x1d, 0x4054c029},
5486 {0x1e, 0x411111f0},
5487 {0x21, 0x0221105f}),
5488 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5489 {0x12, 0x40000000},
5490 {0x14, 0x90170110},
5491 {0x17, 0x411111f0},
5492 {0x18, 0x411111f0},
5493 {0x19, 0x411111f0},
5494 {0x1a, 0x411111f0},
5495 {0x1b, 0x01014020},
5496 {0x1d, 0x4054c029},
5497 {0x1e, 0x411111f0},
5498 {0x21, 0x0221101f}),
5499 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5411 {0x12, 0x90a60160}, 5500 {0x12, 0x90a60160},
5412 {0x14, 0x90170120}, 5501 {0x14, 0x90170120},
5413 {0x17, 0x90170140}, 5502 {0x17, 0x90170140},
@@ -5469,10 +5558,19 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
5469 {0x21, 0x02211030}), 5558 {0x21, 0x02211030}),
5470 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, 5559 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5471 ALC256_STANDARD_PINS, 5560 ALC256_STANDARD_PINS,
5472 {0x13, 0x40000000}), 5561 {0x13, 0x40000000},
5562 {0x1d, 0x40700001},
5563 {0x1e, 0x411111f0}),
5564 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5565 ALC256_STANDARD_PINS,
5566 {0x13, 0x411111f0},
5567 {0x1d, 0x40700001},
5568 {0x1e, 0x411111f0}),
5473 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, 5569 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5474 ALC256_STANDARD_PINS, 5570 ALC256_STANDARD_PINS,
5475 {0x13, 0x411111f0}), 5571 {0x13, 0x411111f0},
5572 {0x1d, 0x4077992d},
5573 {0x1e, 0x411111ff}),
5476 SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4, 5574 SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
5477 {0x12, 0x90a60130}, 5575 {0x12, 0x90a60130},
5478 {0x13, 0x40000000}, 5576 {0x13, 0x40000000},
@@ -5635,35 +5733,48 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
5635 {0x13, 0x411111f0}, 5733 {0x13, 0x411111f0},
5636 {0x16, 0x01014020}, 5734 {0x16, 0x01014020},
5637 {0x18, 0x411111f0}, 5735 {0x18, 0x411111f0},
5638 {0x19, 0x01a19030}), 5736 {0x19, 0x01a19030},
5737 {0x1e, 0x411111f0}),
5639 SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, 5738 SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE,
5640 ALC292_STANDARD_PINS, 5739 ALC292_STANDARD_PINS,
5641 {0x12, 0x90a60140}, 5740 {0x12, 0x90a60140},
5642 {0x13, 0x411111f0}, 5741 {0x13, 0x411111f0},
5643 {0x16, 0x01014020}, 5742 {0x16, 0x01014020},
5644 {0x18, 0x02a19031}, 5743 {0x18, 0x02a19031},
5645 {0x19, 0x01a1903e}), 5744 {0x19, 0x01a1903e},
5745 {0x1e, 0x411111f0}),
5646 SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL3_MIC_NO_PRESENCE, 5746 SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
5647 ALC292_STANDARD_PINS, 5747 ALC292_STANDARD_PINS,
5648 {0x12, 0x90a60140}, 5748 {0x12, 0x90a60140},
5649 {0x13, 0x411111f0}, 5749 {0x13, 0x411111f0},
5650 {0x16, 0x411111f0}, 5750 {0x16, 0x411111f0},
5651 {0x18, 0x411111f0}, 5751 {0x18, 0x411111f0},
5652 {0x19, 0x411111f0}), 5752 {0x19, 0x411111f0},
5753 {0x1e, 0x411111f0}),
5653 SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE, 5754 SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
5654 ALC292_STANDARD_PINS, 5755 ALC292_STANDARD_PINS,
5655 {0x12, 0x40000000}, 5756 {0x12, 0x40000000},
5656 {0x13, 0x90a60140}, 5757 {0x13, 0x90a60140},
5657 {0x16, 0x21014020}, 5758 {0x16, 0x21014020},
5658 {0x18, 0x411111f0}, 5759 {0x18, 0x411111f0},
5659 {0x19, 0x21a19030}), 5760 {0x19, 0x21a19030},
5761 {0x1e, 0x411111f0}),
5660 SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE, 5762 SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
5661 ALC292_STANDARD_PINS, 5763 ALC292_STANDARD_PINS,
5662 {0x12, 0x40000000}, 5764 {0x12, 0x40000000},
5663 {0x13, 0x90a60140}, 5765 {0x13, 0x90a60140},
5664 {0x16, 0x411111f0}, 5766 {0x16, 0x411111f0},
5665 {0x18, 0x411111f0}, 5767 {0x18, 0x411111f0},
5666 {0x19, 0x411111f0}), 5768 {0x19, 0x411111f0},
5769 {0x1e, 0x411111f0}),
5770 SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
5771 ALC292_STANDARD_PINS,
5772 {0x12, 0x40000000},
5773 {0x13, 0x90a60140},
5774 {0x16, 0x21014020},
5775 {0x18, 0x411111f0},
5776 {0x19, 0x21a19030},
5777 {0x1e, 0x411111ff}),
5667 SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, 5778 SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
5668 ALC298_STANDARD_PINS, 5779 ALC298_STANDARD_PINS,
5669 {0x12, 0x90a60130}, 5780 {0x12, 0x90a60130},
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index dcc7fe91244c..9d947aef2c8b 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -2920,7 +2920,8 @@ static const struct snd_pci_quirk stac92hd83xxx_fixup_tbl[] = {
2920 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x148a, 2920 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x148a,
2921 "HP Mini", STAC_92HD83XXX_HP_LED), 2921 "HP Mini", STAC_92HD83XXX_HP_LED),
2922 SND_PCI_QUIRK_VENDOR(PCI_VENDOR_ID_HP, "HP", STAC_92HD83XXX_HP), 2922 SND_PCI_QUIRK_VENDOR(PCI_VENDOR_ID_HP, "HP", STAC_92HD83XXX_HP),
2923 SND_PCI_QUIRK(PCI_VENDOR_ID_TOSHIBA, 0xfa91, 2923 /* match both for 0xfa91 and 0xfa93 */
2924 SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_TOSHIBA, 0xfffd, 0xfa91,
2924 "Toshiba Satellite S50D", STAC_92HD83XXX_GPIO10_EAPD), 2925 "Toshiba Satellite S50D", STAC_92HD83XXX_GPIO10_EAPD),
2925 {} /* terminator */ 2926 {} /* terminator */
2926}; 2927};
diff --git a/sound/pci/oxygen/oxygen_mixer.c b/sound/pci/oxygen/oxygen_mixer.c
index 6492bca8c70f..4ca12665ff73 100644
--- a/sound/pci/oxygen/oxygen_mixer.c
+++ b/sound/pci/oxygen/oxygen_mixer.c
@@ -88,7 +88,7 @@ static int dac_mute_put(struct snd_kcontrol *ctl,
88 int changed; 88 int changed;
89 89
90 mutex_lock(&chip->mutex); 90 mutex_lock(&chip->mutex);
91 changed = !value->value.integer.value[0] != chip->dac_mute; 91 changed = (!value->value.integer.value[0]) != chip->dac_mute;
92 if (changed) { 92 if (changed) {
93 chip->dac_mute = !value->value.integer.value[0]; 93 chip->dac_mute = !value->value.integer.value[0];
94 chip->model.update_dac_mute(chip); 94 chip->model.update_dac_mute(chip);
diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig
index 2ae9619443d1..1d651b8a8957 100644
--- a/sound/soc/Kconfig
+++ b/sound/soc/Kconfig
@@ -30,6 +30,9 @@ config SND_SOC_GENERIC_DMAENGINE_PCM
30 bool 30 bool
31 select SND_DMAENGINE_PCM 31 select SND_DMAENGINE_PCM
32 32
33config SND_SOC_TOPOLOGY
34 bool
35
33# All the supported SoCs 36# All the supported SoCs
34source "sound/soc/adi/Kconfig" 37source "sound/soc/adi/Kconfig"
35source "sound/soc/atmel/Kconfig" 38source "sound/soc/atmel/Kconfig"
diff --git a/sound/soc/Makefile b/sound/soc/Makefile
index e189903fabf4..669648b41d30 100644
--- a/sound/soc/Makefile
+++ b/sound/soc/Makefile
@@ -1,6 +1,9 @@
1snd-soc-core-objs := soc-core.o soc-dapm.o soc-jack.o soc-cache.o soc-utils.o 1snd-soc-core-objs := soc-core.o soc-dapm.o soc-jack.o soc-cache.o soc-utils.o
2snd-soc-core-objs += soc-pcm.o soc-compress.o soc-io.o soc-devres.o soc-ops.o 2snd-soc-core-objs += soc-pcm.o soc-compress.o soc-io.o soc-devres.o soc-ops.o
3
4ifneq ($(CONFIG_SND_SOC_TOPOLOGY),)
3snd-soc-core-objs += soc-topology.o 5snd-soc-core-objs += soc-topology.o
6endif
4 7
5ifneq ($(CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM),) 8ifneq ($(CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM),)
6snd-soc-core-objs += soc-generic-dmaengine-pcm.o 9snd-soc-core-objs += soc-generic-dmaengine-pcm.o
diff --git a/sound/soc/codecs/cs4265.c b/sound/soc/codecs/cs4265.c
index d7ec4756e45b..8e36198474d9 100644
--- a/sound/soc/codecs/cs4265.c
+++ b/sound/soc/codecs/cs4265.c
@@ -457,14 +457,14 @@ static int cs4265_pcm_hw_params(struct snd_pcm_substream *substream,
457 case SND_SOC_DAIFMT_RIGHT_J: 457 case SND_SOC_DAIFMT_RIGHT_J:
458 if (params_width(params) == 16) { 458 if (params_width(params) == 16) {
459 snd_soc_update_bits(codec, CS4265_DAC_CTL, 459 snd_soc_update_bits(codec, CS4265_DAC_CTL,
460 CS4265_DAC_CTL_DIF, (1 << 5)); 460 CS4265_DAC_CTL_DIF, (2 << 4));
461 snd_soc_update_bits(codec, CS4265_SPDIF_CTL2, 461 snd_soc_update_bits(codec, CS4265_SPDIF_CTL2,
462 CS4265_SPDIF_CTL2_DIF, (1 << 7)); 462 CS4265_SPDIF_CTL2_DIF, (2 << 6));
463 } else { 463 } else {
464 snd_soc_update_bits(codec, CS4265_DAC_CTL, 464 snd_soc_update_bits(codec, CS4265_DAC_CTL,
465 CS4265_DAC_CTL_DIF, (3 << 5)); 465 CS4265_DAC_CTL_DIF, (3 << 4));
466 snd_soc_update_bits(codec, CS4265_SPDIF_CTL2, 466 snd_soc_update_bits(codec, CS4265_SPDIF_CTL2,
467 CS4265_SPDIF_CTL2_DIF, (1 << 7)); 467 CS4265_SPDIF_CTL2_DIF, (3 << 6));
468 } 468 }
469 break; 469 break;
470 case SND_SOC_DAIFMT_LEFT_J: 470 case SND_SOC_DAIFMT_LEFT_J:
@@ -473,7 +473,7 @@ static int cs4265_pcm_hw_params(struct snd_pcm_substream *substream,
473 snd_soc_update_bits(codec, CS4265_ADC_CTL, 473 snd_soc_update_bits(codec, CS4265_ADC_CTL,
474 CS4265_ADC_DIF, 0); 474 CS4265_ADC_DIF, 0);
475 snd_soc_update_bits(codec, CS4265_SPDIF_CTL2, 475 snd_soc_update_bits(codec, CS4265_SPDIF_CTL2,
476 CS4265_SPDIF_CTL2_DIF, (1 << 6)); 476 CS4265_SPDIF_CTL2_DIF, 0);
477 477
478 break; 478 break;
479 default: 479 default:
diff --git a/sound/soc/codecs/pcm1681.c b/sound/soc/codecs/pcm1681.c
index 477e13d30971..e7ba557979cb 100644
--- a/sound/soc/codecs/pcm1681.c
+++ b/sound/soc/codecs/pcm1681.c
@@ -102,7 +102,7 @@ static int pcm1681_set_deemph(struct snd_soc_codec *codec)
102 102
103 if (val != -1) { 103 if (val != -1) {
104 regmap_update_bits(priv->regmap, PCM1681_DEEMPH_CONTROL, 104 regmap_update_bits(priv->regmap, PCM1681_DEEMPH_CONTROL,
105 PCM1681_DEEMPH_RATE_MASK, val); 105 PCM1681_DEEMPH_RATE_MASK, val << 3);
106 enable = 1; 106 enable = 1;
107 } else 107 } else
108 enable = 0; 108 enable = 0;
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index 9ce311e088fc..961bd7e5877e 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -2943,6 +2943,9 @@ static int rt5645_irq_detection(struct rt5645_priv *rt5645)
2943{ 2943{
2944 int val, btn_type, gpio_state = 0, report = 0; 2944 int val, btn_type, gpio_state = 0, report = 0;
2945 2945
2946 if (!rt5645->codec)
2947 return -EINVAL;
2948
2946 switch (rt5645->pdata.jd_mode) { 2949 switch (rt5645->pdata.jd_mode) {
2947 case 0: /* Not using rt5645 JD */ 2950 case 0: /* Not using rt5645 JD */
2948 if (rt5645->gpiod_hp_det) { 2951 if (rt5645->gpiod_hp_det) {
@@ -3338,6 +3341,8 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
3338 break; 3341 break;
3339 3342
3340 case RT5645_DMIC_DATA_GPIO5: 3343 case RT5645_DMIC_DATA_GPIO5:
3344 regmap_update_bits(rt5645->regmap, RT5645_GPIO_CTRL1,
3345 RT5645_I2S2_DAC_PIN_MASK, RT5645_I2S2_DAC_PIN_GPIO);
3341 regmap_update_bits(rt5645->regmap, RT5645_DMIC_CTRL1, 3346 regmap_update_bits(rt5645->regmap, RT5645_DMIC_CTRL1,
3342 RT5645_DMIC_1_DP_MASK, RT5645_DMIC_1_DP_GPIO5); 3347 RT5645_DMIC_1_DP_MASK, RT5645_DMIC_1_DP_GPIO5);
3343 regmap_update_bits(rt5645->regmap, RT5645_GPIO_CTRL1, 3348 regmap_update_bits(rt5645->regmap, RT5645_GPIO_CTRL1,
diff --git a/sound/soc/codecs/rt5645.h b/sound/soc/codecs/rt5645.h
index 0353a6a273ab..278bb9f464c4 100644
--- a/sound/soc/codecs/rt5645.h
+++ b/sound/soc/codecs/rt5645.h
@@ -1693,6 +1693,10 @@
1693#define RT5645_GP6_PIN_SFT 6 1693#define RT5645_GP6_PIN_SFT 6
1694#define RT5645_GP6_PIN_GPIO6 (0x0 << 6) 1694#define RT5645_GP6_PIN_GPIO6 (0x0 << 6)
1695#define RT5645_GP6_PIN_DMIC2_SDA (0x1 << 6) 1695#define RT5645_GP6_PIN_DMIC2_SDA (0x1 << 6)
1696#define RT5645_I2S2_DAC_PIN_MASK (0x1 << 4)
1697#define RT5645_I2S2_DAC_PIN_SFT 4
1698#define RT5645_I2S2_DAC_PIN_I2S (0x0 << 4)
1699#define RT5645_I2S2_DAC_PIN_GPIO (0x1 << 4)
1696#define RT5645_GP8_PIN_MASK (0x1 << 3) 1700#define RT5645_GP8_PIN_MASK (0x1 << 3)
1697#define RT5645_GP8_PIN_SFT 3 1701#define RT5645_GP8_PIN_SFT 3
1698#define RT5645_GP8_PIN_GPIO8 (0x0 << 3) 1702#define RT5645_GP8_PIN_GPIO8 (0x0 << 3)
diff --git a/sound/soc/codecs/sgtl5000.h b/sound/soc/codecs/sgtl5000.h
index bd7a344bf8c5..1c317de26176 100644
--- a/sound/soc/codecs/sgtl5000.h
+++ b/sound/soc/codecs/sgtl5000.h
@@ -275,7 +275,7 @@
275#define SGTL5000_BIAS_CTRL_MASK 0x000e 275#define SGTL5000_BIAS_CTRL_MASK 0x000e
276#define SGTL5000_BIAS_CTRL_SHIFT 1 276#define SGTL5000_BIAS_CTRL_SHIFT 1
277#define SGTL5000_BIAS_CTRL_WIDTH 3 277#define SGTL5000_BIAS_CTRL_WIDTH 3
278#define SGTL5000_SMALL_POP 0 278#define SGTL5000_SMALL_POP 1
279 279
280/* 280/*
281 * SGTL5000_CHIP_MIC_CTRL 281 * SGTL5000_CHIP_MIC_CTRL
diff --git a/sound/soc/codecs/ssm4567.c b/sound/soc/codecs/ssm4567.c
index 938d2cb6d78b..84a4f5ad8064 100644
--- a/sound/soc/codecs/ssm4567.c
+++ b/sound/soc/codecs/ssm4567.c
@@ -315,7 +315,13 @@ static int ssm4567_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
315 if (invert_fclk) 315 if (invert_fclk)
316 ctrl1 |= SSM4567_SAI_CTRL_1_FSYNC; 316 ctrl1 |= SSM4567_SAI_CTRL_1_FSYNC;
317 317
318 return regmap_write(ssm4567->regmap, SSM4567_REG_SAI_CTRL_1, ctrl1); 318 return regmap_update_bits(ssm4567->regmap, SSM4567_REG_SAI_CTRL_1,
319 SSM4567_SAI_CTRL_1_BCLK |
320 SSM4567_SAI_CTRL_1_FSYNC |
321 SSM4567_SAI_CTRL_1_LJ |
322 SSM4567_SAI_CTRL_1_TDM |
323 SSM4567_SAI_CTRL_1_PDM,
324 ctrl1);
319} 325}
320 326
321static int ssm4567_set_power(struct ssm4567 *ssm4567, bool enable) 327static int ssm4567_set_power(struct ssm4567 *ssm4567, bool enable)
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index c7647e066cfd..c0b940e2019f 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -633,7 +633,7 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
633 sub *= 100000; 633 sub *= 100000;
634 do_div(sub, freq); 634 do_div(sub, freq);
635 635
636 if (sub < savesub) { 636 if (sub < savesub && !(i == 0 && psr == 0 && div2 == 0)) {
637 baudrate = tmprate; 637 baudrate = tmprate;
638 savesub = sub; 638 savesub = sub;
639 pm = i; 639 pm = i;
diff --git a/sound/soc/intel/Makefile b/sound/soc/intel/Makefile
index 3853ec2ddbc7..6de5d5cd3280 100644
--- a/sound/soc/intel/Makefile
+++ b/sound/soc/intel/Makefile
@@ -7,4 +7,4 @@ obj-$(CONFIG_SND_SOC_INTEL_BAYTRAIL) += baytrail/
7obj-$(CONFIG_SND_SST_MFLD_PLATFORM) += atom/ 7obj-$(CONFIG_SND_SST_MFLD_PLATFORM) += atom/
8 8
9# Machine support 9# Machine support
10obj-$(CONFIG_SND_SOC_INTEL_SST) += boards/ 10obj-$(CONFIG_SND_SOC) += boards/
diff --git a/sound/soc/intel/atom/sst/sst_drv_interface.c b/sound/soc/intel/atom/sst/sst_drv_interface.c
index 620da1d1b9e3..0e0e4d9c021f 100644
--- a/sound/soc/intel/atom/sst/sst_drv_interface.c
+++ b/sound/soc/intel/atom/sst/sst_drv_interface.c
@@ -42,6 +42,11 @@
42#define MIN_FRAGMENT_SIZE (50 * 1024) 42#define MIN_FRAGMENT_SIZE (50 * 1024)
43#define MAX_FRAGMENT_SIZE (1024 * 1024) 43#define MAX_FRAGMENT_SIZE (1024 * 1024)
44#define SST_GET_BYTES_PER_SAMPLE(pcm_wd_sz) (((pcm_wd_sz + 15) >> 4) << 1) 44#define SST_GET_BYTES_PER_SAMPLE(pcm_wd_sz) (((pcm_wd_sz + 15) >> 4) << 1)
45#ifdef CONFIG_PM
46#define GET_USAGE_COUNT(dev) (atomic_read(&dev->power.usage_count))
47#else
48#define GET_USAGE_COUNT(dev) 1
49#endif
45 50
46int free_stream_context(struct intel_sst_drv *ctx, unsigned int str_id) 51int free_stream_context(struct intel_sst_drv *ctx, unsigned int str_id)
47{ 52{
@@ -141,15 +146,9 @@ static int sst_power_control(struct device *dev, bool state)
141 int ret = 0; 146 int ret = 0;
142 int usage_count = 0; 147 int usage_count = 0;
143 148
144#ifdef CONFIG_PM
145 usage_count = atomic_read(&dev->power.usage_count);
146#else
147 usage_count = 1;
148#endif
149
150 if (state == true) { 149 if (state == true) {
151 ret = pm_runtime_get_sync(dev); 150 ret = pm_runtime_get_sync(dev);
152 151 usage_count = GET_USAGE_COUNT(dev);
153 dev_dbg(ctx->dev, "Enable: pm usage count: %d\n", usage_count); 152 dev_dbg(ctx->dev, "Enable: pm usage count: %d\n", usage_count);
154 if (ret < 0) { 153 if (ret < 0) {
155 dev_err(ctx->dev, "Runtime get failed with err: %d\n", ret); 154 dev_err(ctx->dev, "Runtime get failed with err: %d\n", ret);
@@ -164,6 +163,7 @@ static int sst_power_control(struct device *dev, bool state)
164 } 163 }
165 } 164 }
166 } else { 165 } else {
166 usage_count = GET_USAGE_COUNT(dev);
167 dev_dbg(ctx->dev, "Disable: pm usage count: %d\n", usage_count); 167 dev_dbg(ctx->dev, "Disable: pm usage count: %d\n", usage_count);
168 return sst_pm_runtime_put(ctx); 168 return sst_pm_runtime_put(ctx);
169 } 169 }
diff --git a/sound/soc/intel/baytrail/sst-baytrail-ipc.c b/sound/soc/intel/baytrail/sst-baytrail-ipc.c
index 4c01bb43928d..5bbaa667bec1 100644
--- a/sound/soc/intel/baytrail/sst-baytrail-ipc.c
+++ b/sound/soc/intel/baytrail/sst-baytrail-ipc.c
@@ -701,6 +701,8 @@ int sst_byt_dsp_init(struct device *dev, struct sst_pdata *pdata)
701 if (byt == NULL) 701 if (byt == NULL)
702 return -ENOMEM; 702 return -ENOMEM;
703 703
704 byt->dev = dev;
705
704 ipc = &byt->ipc; 706 ipc = &byt->ipc;
705 ipc->dev = dev; 707 ipc->dev = dev;
706 ipc->ops.tx_msg = byt_tx_msg; 708 ipc->ops.tx_msg = byt_tx_msg;
diff --git a/sound/soc/intel/boards/cht_bsw_max98090_ti.c b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
index d604ee80eda4..70f832114a5a 100644
--- a/sound/soc/intel/boards/cht_bsw_max98090_ti.c
+++ b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
@@ -69,12 +69,12 @@ static const struct snd_soc_dapm_route cht_audio_map[] = {
69 {"Headphone", NULL, "HPR"}, 69 {"Headphone", NULL, "HPR"},
70 {"Ext Spk", NULL, "SPKL"}, 70 {"Ext Spk", NULL, "SPKL"},
71 {"Ext Spk", NULL, "SPKR"}, 71 {"Ext Spk", NULL, "SPKR"},
72 {"AIF1 Playback", NULL, "ssp2 Tx"}, 72 {"HiFi Playback", NULL, "ssp2 Tx"},
73 {"ssp2 Tx", NULL, "codec_out0"}, 73 {"ssp2 Tx", NULL, "codec_out0"},
74 {"ssp2 Tx", NULL, "codec_out1"}, 74 {"ssp2 Tx", NULL, "codec_out1"},
75 {"codec_in0", NULL, "ssp2 Rx" }, 75 {"codec_in0", NULL, "ssp2 Rx" },
76 {"codec_in1", NULL, "ssp2 Rx" }, 76 {"codec_in1", NULL, "ssp2 Rx" },
77 {"ssp2 Rx", NULL, "AIF1 Capture"}, 77 {"ssp2 Rx", NULL, "HiFi Capture"},
78}; 78};
79 79
80static const struct snd_kcontrol_new cht_mc_controls[] = { 80static const struct snd_kcontrol_new cht_mc_controls[] = {
diff --git a/sound/soc/intel/haswell/sst-haswell-ipc.c b/sound/soc/intel/haswell/sst-haswell-ipc.c
index f95f271aab0c..f6efa9d4acad 100644
--- a/sound/soc/intel/haswell/sst-haswell-ipc.c
+++ b/sound/soc/intel/haswell/sst-haswell-ipc.c
@@ -2119,6 +2119,8 @@ int sst_hsw_dsp_init(struct device *dev, struct sst_pdata *pdata)
2119 if (hsw == NULL) 2119 if (hsw == NULL)
2120 return -ENOMEM; 2120 return -ENOMEM;
2121 2121
2122 hsw->dev = dev;
2123
2122 ipc = &hsw->ipc; 2124 ipc = &hsw->ipc;
2123 ipc->dev = dev; 2125 ipc->dev = dev;
2124 ipc->ops.tx_msg = hsw_tx_msg; 2126 ipc->ops.tx_msg = hsw_tx_msg;
diff --git a/sound/soc/mediatek/mt8173-max98090.c b/sound/soc/mediatek/mt8173-max98090.c
index 4d44b5803e55..2d2536af141f 100644
--- a/sound/soc/mediatek/mt8173-max98090.c
+++ b/sound/soc/mediatek/mt8173-max98090.c
@@ -103,7 +103,6 @@ static struct snd_soc_dai_link mt8173_max98090_dais[] = {
103 .name = "MAX98090 Playback", 103 .name = "MAX98090 Playback",
104 .stream_name = "MAX98090 Playback", 104 .stream_name = "MAX98090 Playback",
105 .cpu_dai_name = "DL1", 105 .cpu_dai_name = "DL1",
106 .platform_name = "11220000.mt8173-afe-pcm",
107 .codec_name = "snd-soc-dummy", 106 .codec_name = "snd-soc-dummy",
108 .codec_dai_name = "snd-soc-dummy-dai", 107 .codec_dai_name = "snd-soc-dummy-dai",
109 .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, 108 .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
@@ -114,7 +113,6 @@ static struct snd_soc_dai_link mt8173_max98090_dais[] = {
114 .name = "MAX98090 Capture", 113 .name = "MAX98090 Capture",
115 .stream_name = "MAX98090 Capture", 114 .stream_name = "MAX98090 Capture",
116 .cpu_dai_name = "VUL", 115 .cpu_dai_name = "VUL",
117 .platform_name = "11220000.mt8173-afe-pcm",
118 .codec_name = "snd-soc-dummy", 116 .codec_name = "snd-soc-dummy",
119 .codec_dai_name = "snd-soc-dummy-dai", 117 .codec_dai_name = "snd-soc-dummy-dai",
120 .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, 118 .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
@@ -125,7 +123,6 @@ static struct snd_soc_dai_link mt8173_max98090_dais[] = {
125 { 123 {
126 .name = "Codec", 124 .name = "Codec",
127 .cpu_dai_name = "I2S", 125 .cpu_dai_name = "I2S",
128 .platform_name = "11220000.mt8173-afe-pcm",
129 .no_pcm = 1, 126 .no_pcm = 1,
130 .codec_dai_name = "HiFi", 127 .codec_dai_name = "HiFi",
131 .init = mt8173_max98090_init, 128 .init = mt8173_max98090_init,
@@ -152,9 +149,21 @@ static struct snd_soc_card mt8173_max98090_card = {
152static int mt8173_max98090_dev_probe(struct platform_device *pdev) 149static int mt8173_max98090_dev_probe(struct platform_device *pdev)
153{ 150{
154 struct snd_soc_card *card = &mt8173_max98090_card; 151 struct snd_soc_card *card = &mt8173_max98090_card;
155 struct device_node *codec_node; 152 struct device_node *codec_node, *platform_node;
156 int ret, i; 153 int ret, i;
157 154
155 platform_node = of_parse_phandle(pdev->dev.of_node,
156 "mediatek,platform", 0);
157 if (!platform_node) {
158 dev_err(&pdev->dev, "Property 'platform' missing or invalid\n");
159 return -EINVAL;
160 }
161 for (i = 0; i < card->num_links; i++) {
162 if (mt8173_max98090_dais[i].platform_name)
163 continue;
164 mt8173_max98090_dais[i].platform_of_node = platform_node;
165 }
166
158 codec_node = of_parse_phandle(pdev->dev.of_node, 167 codec_node = of_parse_phandle(pdev->dev.of_node,
159 "mediatek,audio-codec", 0); 168 "mediatek,audio-codec", 0);
160 if (!codec_node) { 169 if (!codec_node) {
diff --git a/sound/soc/mediatek/mt8173-rt5650-rt5676.c b/sound/soc/mediatek/mt8173-rt5650-rt5676.c
index 094055323059..6f52eca05e26 100644
--- a/sound/soc/mediatek/mt8173-rt5650-rt5676.c
+++ b/sound/soc/mediatek/mt8173-rt5650-rt5676.c
@@ -138,7 +138,6 @@ static struct snd_soc_dai_link mt8173_rt5650_rt5676_dais[] = {
138 .name = "rt5650_rt5676 Playback", 138 .name = "rt5650_rt5676 Playback",
139 .stream_name = "rt5650_rt5676 Playback", 139 .stream_name = "rt5650_rt5676 Playback",
140 .cpu_dai_name = "DL1", 140 .cpu_dai_name = "DL1",
141 .platform_name = "11220000.mt8173-afe-pcm",
142 .codec_name = "snd-soc-dummy", 141 .codec_name = "snd-soc-dummy",
143 .codec_dai_name = "snd-soc-dummy-dai", 142 .codec_dai_name = "snd-soc-dummy-dai",
144 .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, 143 .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
@@ -149,7 +148,6 @@ static struct snd_soc_dai_link mt8173_rt5650_rt5676_dais[] = {
149 .name = "rt5650_rt5676 Capture", 148 .name = "rt5650_rt5676 Capture",
150 .stream_name = "rt5650_rt5676 Capture", 149 .stream_name = "rt5650_rt5676 Capture",
151 .cpu_dai_name = "VUL", 150 .cpu_dai_name = "VUL",
152 .platform_name = "11220000.mt8173-afe-pcm",
153 .codec_name = "snd-soc-dummy", 151 .codec_name = "snd-soc-dummy",
154 .codec_dai_name = "snd-soc-dummy-dai", 152 .codec_dai_name = "snd-soc-dummy-dai",
155 .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, 153 .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
@@ -161,7 +159,6 @@ static struct snd_soc_dai_link mt8173_rt5650_rt5676_dais[] = {
161 { 159 {
162 .name = "Codec", 160 .name = "Codec",
163 .cpu_dai_name = "I2S", 161 .cpu_dai_name = "I2S",
164 .platform_name = "11220000.mt8173-afe-pcm",
165 .no_pcm = 1, 162 .no_pcm = 1,
166 .codecs = mt8173_rt5650_rt5676_codecs, 163 .codecs = mt8173_rt5650_rt5676_codecs,
167 .num_codecs = 2, 164 .num_codecs = 2,
@@ -209,7 +206,21 @@ static struct snd_soc_card mt8173_rt5650_rt5676_card = {
209static int mt8173_rt5650_rt5676_dev_probe(struct platform_device *pdev) 206static int mt8173_rt5650_rt5676_dev_probe(struct platform_device *pdev)
210{ 207{
211 struct snd_soc_card *card = &mt8173_rt5650_rt5676_card; 208 struct snd_soc_card *card = &mt8173_rt5650_rt5676_card;
212 int ret; 209 struct device_node *platform_node;
210 int i, ret;
211
212 platform_node = of_parse_phandle(pdev->dev.of_node,
213 "mediatek,platform", 0);
214 if (!platform_node) {
215 dev_err(&pdev->dev, "Property 'platform' missing or invalid\n");
216 return -EINVAL;
217 }
218
219 for (i = 0; i < card->num_links; i++) {
220 if (mt8173_rt5650_rt5676_dais[i].platform_name)
221 continue;
222 mt8173_rt5650_rt5676_dais[i].platform_of_node = platform_node;
223 }
213 224
214 mt8173_rt5650_rt5676_codecs[0].of_node = 225 mt8173_rt5650_rt5676_codecs[0].of_node =
215 of_parse_phandle(pdev->dev.of_node, "mediatek,audio-codec", 0); 226 of_parse_phandle(pdev->dev.of_node, "mediatek,audio-codec", 0);
diff --git a/sound/soc/mediatek/mtk-afe-pcm.c b/sound/soc/mediatek/mtk-afe-pcm.c
index cc228db5fb76..9863da73dfe0 100644
--- a/sound/soc/mediatek/mtk-afe-pcm.c
+++ b/sound/soc/mediatek/mtk-afe-pcm.c
@@ -1199,6 +1199,8 @@ err_pm_disable:
1199static int mtk_afe_pcm_dev_remove(struct platform_device *pdev) 1199static int mtk_afe_pcm_dev_remove(struct platform_device *pdev)
1200{ 1200{
1201 pm_runtime_disable(&pdev->dev); 1201 pm_runtime_disable(&pdev->dev);
1202 if (!pm_runtime_status_suspended(&pdev->dev))
1203 mtk_afe_runtime_suspend(&pdev->dev);
1202 snd_soc_unregister_component(&pdev->dev); 1204 snd_soc_unregister_component(&pdev->dev);
1203 snd_soc_unregister_platform(&pdev->dev); 1205 snd_soc_unregister_platform(&pdev->dev);
1204 return 0; 1206 return 0;
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 3a4a5c0e3f97..0e1e69c7abd5 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1716,6 +1716,7 @@ card_probe_error:
1716 if (card->remove) 1716 if (card->remove)
1717 card->remove(card); 1717 card->remove(card);
1718 1718
1719 snd_soc_dapm_free(&card->dapm);
1719 soc_cleanup_card_debugfs(card); 1720 soc_cleanup_card_debugfs(card);
1720 snd_card_free(card->snd_card); 1721 snd_card_free(card->snd_card);
1721 1722
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index aa327c92480c..e0de8072c514 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -358,9 +358,10 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
358 data->widget = 358 data->widget =
359 snd_soc_dapm_new_control_unlocked(widget->dapm, 359 snd_soc_dapm_new_control_unlocked(widget->dapm,
360 &template); 360 &template);
361 kfree(name);
361 if (!data->widget) { 362 if (!data->widget) {
362 ret = -ENOMEM; 363 ret = -ENOMEM;
363 goto err_name; 364 goto err_data;
364 } 365 }
365 } 366 }
366 break; 367 break;
@@ -389,11 +390,12 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
389 390
390 data->value = template.on_val; 391 data->value = template.on_val;
391 392
392 data->widget = snd_soc_dapm_new_control(widget->dapm, 393 data->widget = snd_soc_dapm_new_control_unlocked(
393 &template); 394 widget->dapm, &template);
395 kfree(name);
394 if (!data->widget) { 396 if (!data->widget) {
395 ret = -ENOMEM; 397 ret = -ENOMEM;
396 goto err_name; 398 goto err_data;
397 } 399 }
398 400
399 snd_soc_dapm_add_path(widget->dapm, data->widget, 401 snd_soc_dapm_add_path(widget->dapm, data->widget,
@@ -408,8 +410,6 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
408 410
409 return 0; 411 return 0;
410 412
411err_name:
412 kfree(name);
413err_data: 413err_data:
414 kfree(data); 414 kfree(data);
415 return ret; 415 return ret;
@@ -418,8 +418,6 @@ err_data:
418static void dapm_kcontrol_free(struct snd_kcontrol *kctl) 418static void dapm_kcontrol_free(struct snd_kcontrol *kctl)
419{ 419{
420 struct dapm_kcontrol_data *data = snd_kcontrol_chip(kctl); 420 struct dapm_kcontrol_data *data = snd_kcontrol_chip(kctl);
421 if (data->widget)
422 kfree(data->widget->name);
423 kfree(data->wlist); 421 kfree(data->wlist);
424 kfree(data); 422 kfree(data);
425} 423}
@@ -1952,6 +1950,7 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
1952 size_t count, loff_t *ppos) 1950 size_t count, loff_t *ppos)
1953{ 1951{
1954 struct snd_soc_dapm_widget *w = file->private_data; 1952 struct snd_soc_dapm_widget *w = file->private_data;
1953 struct snd_soc_card *card = w->dapm->card;
1955 char *buf; 1954 char *buf;
1956 int in, out; 1955 int in, out;
1957 ssize_t ret; 1956 ssize_t ret;
@@ -1961,6 +1960,8 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
1961 if (!buf) 1960 if (!buf)
1962 return -ENOMEM; 1961 return -ENOMEM;
1963 1962
1963 mutex_lock(&card->dapm_mutex);
1964
1964 /* Supply widgets are not handled by is_connected_{input,output}_ep() */ 1965 /* Supply widgets are not handled by is_connected_{input,output}_ep() */
1965 if (w->is_supply) { 1966 if (w->is_supply) {
1966 in = 0; 1967 in = 0;
@@ -2007,6 +2008,8 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
2007 p->sink->name); 2008 p->sink->name);
2008 } 2009 }
2009 2010
2011 mutex_unlock(&card->dapm_mutex);
2012
2010 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); 2013 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
2011 2014
2012 kfree(buf); 2015 kfree(buf);
@@ -2281,11 +2284,15 @@ static ssize_t dapm_widget_show(struct device *dev,
2281 struct snd_soc_pcm_runtime *rtd = dev_get_drvdata(dev); 2284 struct snd_soc_pcm_runtime *rtd = dev_get_drvdata(dev);
2282 int i, count = 0; 2285 int i, count = 0;
2283 2286
2287 mutex_lock(&rtd->card->dapm_mutex);
2288
2284 for (i = 0; i < rtd->num_codecs; i++) { 2289 for (i = 0; i < rtd->num_codecs; i++) {
2285 struct snd_soc_codec *codec = rtd->codec_dais[i]->codec; 2290 struct snd_soc_codec *codec = rtd->codec_dais[i]->codec;
2286 count += dapm_widget_show_codec(codec, buf + count); 2291 count += dapm_widget_show_codec(codec, buf + count);
2287 } 2292 }
2288 2293
2294 mutex_unlock(&rtd->card->dapm_mutex);
2295
2289 return count; 2296 return count;
2290} 2297}
2291 2298
@@ -3334,16 +3341,10 @@ snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
3334 } 3341 }
3335 3342
3336 prefix = soc_dapm_prefix(dapm); 3343 prefix = soc_dapm_prefix(dapm);
3337 if (prefix) { 3344 if (prefix)
3338 w->name = kasprintf(GFP_KERNEL, "%s %s", prefix, widget->name); 3345 w->name = kasprintf(GFP_KERNEL, "%s %s", prefix, widget->name);
3339 if (widget->sname) 3346 else
3340 w->sname = kasprintf(GFP_KERNEL, "%s %s", prefix,
3341 widget->sname);
3342 } else {
3343 w->name = kasprintf(GFP_KERNEL, "%s", widget->name); 3347 w->name = kasprintf(GFP_KERNEL, "%s", widget->name);
3344 if (widget->sname)
3345 w->sname = kasprintf(GFP_KERNEL, "%s", widget->sname);
3346 }
3347 if (w->name == NULL) { 3348 if (w->name == NULL) {
3348 kfree(w); 3349 kfree(w);
3349 return NULL; 3350 return NULL;
@@ -3792,7 +3793,7 @@ int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card)
3792 break; 3793 break;
3793 } 3794 }
3794 3795
3795 if (!w->sname || !strstr(w->sname, dai_w->name)) 3796 if (!w->sname || !strstr(w->sname, dai_w->sname))
3796 continue; 3797 continue;
3797 3798
3798 if (dai_w->id == snd_soc_dapm_dai_in) { 3799 if (dai_w->id == snd_soc_dapm_dai_in) {
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index d0960683c409..31068b8f3db0 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -33,6 +33,7 @@
33#include <sound/soc.h> 33#include <sound/soc.h>
34#include <sound/soc-dapm.h> 34#include <sound/soc-dapm.h>
35#include <sound/soc-topology.h> 35#include <sound/soc-topology.h>
36#include <sound/tlv.h>
36 37
37/* 38/*
38 * We make several passes over the data (since it wont necessarily be ordered) 39 * We make several passes over the data (since it wont necessarily be ordered)
@@ -144,7 +145,7 @@ static const struct snd_soc_tplg_kcontrol_ops io_ops[] = {
144 {SND_SOC_TPLG_CTL_STROBE, snd_soc_get_strobe, 145 {SND_SOC_TPLG_CTL_STROBE, snd_soc_get_strobe,
145 snd_soc_put_strobe, NULL}, 146 snd_soc_put_strobe, NULL},
146 {SND_SOC_TPLG_DAPM_CTL_VOLSW, snd_soc_dapm_get_volsw, 147 {SND_SOC_TPLG_DAPM_CTL_VOLSW, snd_soc_dapm_get_volsw,
147 snd_soc_dapm_put_volsw, NULL}, 148 snd_soc_dapm_put_volsw, snd_soc_info_volsw},
148 {SND_SOC_TPLG_DAPM_CTL_ENUM_DOUBLE, snd_soc_dapm_get_enum_double, 149 {SND_SOC_TPLG_DAPM_CTL_ENUM_DOUBLE, snd_soc_dapm_get_enum_double,
149 snd_soc_dapm_put_enum_double, snd_soc_info_enum_double}, 150 snd_soc_dapm_put_enum_double, snd_soc_info_enum_double},
150 {SND_SOC_TPLG_DAPM_CTL_ENUM_VIRT, snd_soc_dapm_get_enum_double, 151 {SND_SOC_TPLG_DAPM_CTL_ENUM_VIRT, snd_soc_dapm_get_enum_double,
@@ -534,7 +535,7 @@ static int soc_tplg_kcontrol_bind_io(struct snd_soc_tplg_ctl_hdr *hdr,
534 k->put = bops[i].put; 535 k->put = bops[i].put;
535 if (k->get == NULL && bops[i].id == hdr->ops.get) 536 if (k->get == NULL && bops[i].id == hdr->ops.get)
536 k->get = bops[i].get; 537 k->get = bops[i].get;
537 if (k->info == NULL && ops[i].id == hdr->ops.info) 538 if (k->info == NULL && bops[i].id == hdr->ops.info)
538 k->info = bops[i].info; 539 k->info = bops[i].info;
539 } 540 }
540 541
@@ -579,29 +580,51 @@ static int soc_tplg_init_kcontrol(struct soc_tplg *tplg,
579 return 0; 580 return 0;
580} 581}
581 582
583
584static int soc_tplg_create_tlv_db_scale(struct soc_tplg *tplg,
585 struct snd_kcontrol_new *kc, struct snd_soc_tplg_tlv_dbscale *scale)
586{
587 unsigned int item_len = 2 * sizeof(unsigned int);
588 unsigned int *p;
589
590 p = kzalloc(item_len + 2 * sizeof(unsigned int), GFP_KERNEL);
591 if (!p)
592 return -ENOMEM;
593
594 p[0] = SNDRV_CTL_TLVT_DB_SCALE;
595 p[1] = item_len;
596 p[2] = scale->min;
597 p[3] = (scale->step & TLV_DB_SCALE_MASK)
598 | (scale->mute ? TLV_DB_SCALE_MUTE : 0);
599
600 kc->tlv.p = (void *)p;
601 return 0;
602}
603
582static int soc_tplg_create_tlv(struct soc_tplg *tplg, 604static int soc_tplg_create_tlv(struct soc_tplg *tplg,
583 struct snd_kcontrol_new *kc, u32 tlv_size) 605 struct snd_kcontrol_new *kc, struct snd_soc_tplg_ctl_hdr *tc)
584{ 606{
585 struct snd_soc_tplg_ctl_tlv *tplg_tlv; 607 struct snd_soc_tplg_ctl_tlv *tplg_tlv;
586 struct snd_ctl_tlv *tlv;
587 608
588 if (tlv_size == 0) 609 if (!(tc->access & SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE))
589 return 0; 610 return 0;
590 611
591 tplg_tlv = (struct snd_soc_tplg_ctl_tlv *) tplg->pos; 612 if (tc->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
592 tplg->pos += tlv_size; 613 kc->tlv.c = snd_soc_bytes_tlv_callback;
593 614 } else {
594 tlv = kzalloc(sizeof(*tlv) + tlv_size, GFP_KERNEL); 615 tplg_tlv = &tc->tlv;
595 if (tlv == NULL) 616 switch (tplg_tlv->type) {
596 return -ENOMEM; 617 case SNDRV_CTL_TLVT_DB_SCALE:
597 618 return soc_tplg_create_tlv_db_scale(tplg, kc,
598 dev_dbg(tplg->dev, " created TLV type %d size %d bytes\n", 619 &tplg_tlv->scale);
599 tplg_tlv->numid, tplg_tlv->size);
600 620
601 tlv->numid = tplg_tlv->numid; 621 /* TODO: add support for other TLV types */
602 tlv->length = tplg_tlv->size; 622 default:
603 memcpy(tlv->tlv, tplg_tlv + 1, tplg_tlv->size); 623 dev_dbg(tplg->dev, "Unsupported TLV type %d\n",
604 kc->tlv.p = (void *)tlv; 624 tplg_tlv->type);
625 return -EINVAL;
626 }
627 }
605 628
606 return 0; 629 return 0;
607} 630}
@@ -773,7 +796,7 @@ static int soc_tplg_dmixer_create(struct soc_tplg *tplg, unsigned int count,
773 } 796 }
774 797
775 /* create any TLV data */ 798 /* create any TLV data */
776 soc_tplg_create_tlv(tplg, &kc, mc->hdr.tlv_size); 799 soc_tplg_create_tlv(tplg, &kc, &mc->hdr);
777 800
778 /* register control here */ 801 /* register control here */
779 err = soc_tplg_add_kcontrol(tplg, &kc, 802 err = soc_tplg_add_kcontrol(tplg, &kc,
@@ -1351,6 +1374,7 @@ static int soc_tplg_dapm_widget_create(struct soc_tplg *tplg,
1351 template.reg = w->reg; 1374 template.reg = w->reg;
1352 template.shift = w->shift; 1375 template.shift = w->shift;
1353 template.mask = w->mask; 1376 template.mask = w->mask;
1377 template.subseq = w->subseq;
1354 template.on_val = w->invert ? 0 : 1; 1378 template.on_val = w->invert ? 0 : 1;
1355 template.off_val = w->invert ? 1 : 0; 1379 template.off_val = w->invert ? 1 : 0;
1356 template.ignore_suspend = w->ignore_suspend; 1380 template.ignore_suspend = w->ignore_suspend;
diff --git a/sound/soc/zte/zx296702-i2s.c b/sound/soc/zte/zx296702-i2s.c
index 98d96e1b17e0..1930c42e1f55 100644
--- a/sound/soc/zte/zx296702-i2s.c
+++ b/sound/soc/zte/zx296702-i2s.c
@@ -393,9 +393,9 @@ static int zx_i2s_probe(struct platform_device *pdev)
393 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 393 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
394 zx_i2s->mapbase = res->start; 394 zx_i2s->mapbase = res->start;
395 zx_i2s->reg_base = devm_ioremap_resource(&pdev->dev, res); 395 zx_i2s->reg_base = devm_ioremap_resource(&pdev->dev, res);
396 if (!zx_i2s->reg_base) { 396 if (IS_ERR(zx_i2s->reg_base)) {
397 dev_err(&pdev->dev, "ioremap failed!\n"); 397 dev_err(&pdev->dev, "ioremap failed!\n");
398 return -EIO; 398 return PTR_ERR(zx_i2s->reg_base);
399 } 399 }
400 400
401 writel_relaxed(0, zx_i2s->reg_base + ZX_I2S_FIFO_CTRL); 401 writel_relaxed(0, zx_i2s->reg_base + ZX_I2S_FIFO_CTRL);
diff --git a/sound/soc/zte/zx296702-spdif.c b/sound/soc/zte/zx296702-spdif.c
index 11a0e46a1156..26265ce4caca 100644
--- a/sound/soc/zte/zx296702-spdif.c
+++ b/sound/soc/zte/zx296702-spdif.c
@@ -322,9 +322,9 @@ static int zx_spdif_probe(struct platform_device *pdev)
322 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 322 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
323 zx_spdif->mapbase = res->start; 323 zx_spdif->mapbase = res->start;
324 zx_spdif->reg_base = devm_ioremap_resource(&pdev->dev, res); 324 zx_spdif->reg_base = devm_ioremap_resource(&pdev->dev, res);
325 if (!zx_spdif->reg_base) { 325 if (IS_ERR(zx_spdif->reg_base)) {
326 dev_err(&pdev->dev, "ioremap failed!\n"); 326 dev_err(&pdev->dev, "ioremap failed!\n");
327 return -EIO; 327 return PTR_ERR(zx_spdif->reg_base);
328 } 328 }
329 329
330 zx_spdif_dev_init(zx_spdif->reg_base); 330 zx_spdif_dev_init(zx_spdif->reg_base);
diff --git a/sound/sparc/amd7930.c b/sound/sparc/amd7930.c
index 1b1a89e80d13..784ceb85b2d9 100644
--- a/sound/sparc/amd7930.c
+++ b/sound/sparc/amd7930.c
@@ -956,6 +956,7 @@ static int snd_amd7930_create(struct snd_card *card,
956 if (!amd->regs) { 956 if (!amd->regs) {
957 snd_printk(KERN_ERR 957 snd_printk(KERN_ERR
958 "amd7930-%d: Unable to map chip registers.\n", dev); 958 "amd7930-%d: Unable to map chip registers.\n", dev);
959 kfree(amd);
959 return -EIO; 960 return -EIO;
960 } 961 }
961 962
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 1fab9778807a..0450593980fd 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -638,7 +638,7 @@ int snd_usb_autoresume(struct snd_usb_audio *chip)
638 int err = -ENODEV; 638 int err = -ENODEV;
639 639
640 down_read(&chip->shutdown_rwsem); 640 down_read(&chip->shutdown_rwsem);
641 if (chip->probing && chip->in_pm) 641 if (chip->probing || chip->in_pm)
642 err = 0; 642 err = 0;
643 else if (!chip->shutdown) 643 else if (!chip->shutdown)
644 err = usb_autopm_get_interface(chip->pm_intf); 644 err = usb_autopm_get_interface(chip->pm_intf);
diff --git a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c
index 8461d6bf992f..204cc074adb9 100644
--- a/sound/usb/line6/pcm.c
+++ b/sound/usb/line6/pcm.c
@@ -186,12 +186,8 @@ static int line6_stream_start(struct snd_line6_pcm *line6pcm, int direction,
186 int ret = 0; 186 int ret = 0;
187 187
188 spin_lock_irqsave(&pstr->lock, flags); 188 spin_lock_irqsave(&pstr->lock, flags);
189 if (!test_and_set_bit(type, &pstr->running)) { 189 if (!test_and_set_bit(type, &pstr->running) &&
190 if (pstr->active_urbs || pstr->unlink_urbs) { 190 !(pstr->active_urbs || pstr->unlink_urbs)) {
191 ret = -EBUSY;
192 goto error;
193 }
194
195 pstr->count = 0; 191 pstr->count = 0;
196 /* Submit all currently available URBs */ 192 /* Submit all currently available URBs */
197 if (direction == SNDRV_PCM_STREAM_PLAYBACK) 193 if (direction == SNDRV_PCM_STREAM_PLAYBACK)
@@ -199,7 +195,6 @@ static int line6_stream_start(struct snd_line6_pcm *line6pcm, int direction,
199 else 195 else
200 ret = line6_submit_audio_in_all_urbs(line6pcm); 196 ret = line6_submit_audio_in_all_urbs(line6pcm);
201 } 197 }
202 error:
203 if (ret < 0) 198 if (ret < 0)
204 clear_bit(type, &pstr->running); 199 clear_bit(type, &pstr->running);
205 spin_unlock_irqrestore(&pstr->lock, flags); 200 spin_unlock_irqrestore(&pstr->lock, flags);
diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
index e5000da9e9d7..6a803eff87f7 100644
--- a/sound/usb/mixer_maps.c
+++ b/sound/usb/mixer_maps.c
@@ -341,6 +341,20 @@ static const struct usbmix_name_map scms_usb3318_map[] = {
341 { 0 } 341 { 0 }
342}; 342};
343 343
344/* Bose companion 5, the dB conversion factor is 16 instead of 256 */
345static struct usbmix_dB_map bose_companion5_dB = {-5006, -6};
346static struct usbmix_name_map bose_companion5_map[] = {
347 { 3, NULL, .dB = &bose_companion5_dB },
348 { 0 } /* terminator */
349};
350
351/* Dragonfly DAC 1.2, the dB conversion factor is 1 instead of 256 */
352static struct usbmix_dB_map dragonfly_1_2_dB = {0, 5000};
353static struct usbmix_name_map dragonfly_1_2_map[] = {
354 { 7, NULL, .dB = &dragonfly_1_2_dB },
355 { 0 } /* terminator */
356};
357
344/* 358/*
345 * Control map entries 359 * Control map entries
346 */ 360 */
@@ -451,6 +465,16 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
451 .id = USB_ID(0x25c4, 0x0003), 465 .id = USB_ID(0x25c4, 0x0003),
452 .map = scms_usb3318_map, 466 .map = scms_usb3318_map,
453 }, 467 },
468 {
469 /* Bose Companion 5 */
470 .id = USB_ID(0x05a7, 0x1020),
471 .map = bose_companion5_map,
472 },
473 {
474 /* Dragonfly DAC 1.2 */
475 .id = USB_ID(0x21b4, 0x0081),
476 .map = dragonfly_1_2_map,
477 },
454 { 0 } /* terminator */ 478 { 0 } /* terminator */
455}; 479};
456 480
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 2f6d3e9a1bcd..e4756651a52c 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -2512,6 +2512,74 @@ YAMAHA_DEVICE(0x7010, "UB99"),
2512 } 2512 }
2513}, 2513},
2514 2514
2515/* Steinberg devices */
2516{
2517 /* Steinberg MI2 */
2518 USB_DEVICE_VENDOR_SPEC(0x0a4e, 0x2040),
2519 .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
2520 .ifnum = QUIRK_ANY_INTERFACE,
2521 .type = QUIRK_COMPOSITE,
2522 .data = & (const struct snd_usb_audio_quirk[]) {
2523 {
2524 .ifnum = 0,
2525 .type = QUIRK_AUDIO_STANDARD_INTERFACE
2526 },
2527 {
2528 .ifnum = 1,
2529 .type = QUIRK_AUDIO_STANDARD_INTERFACE
2530 },
2531 {
2532 .ifnum = 2,
2533 .type = QUIRK_AUDIO_STANDARD_INTERFACE
2534 },
2535 {
2536 .ifnum = 3,
2537 .type = QUIRK_MIDI_FIXED_ENDPOINT,
2538 .data = &(const struct snd_usb_midi_endpoint_info) {
2539 .out_cables = 0x0001,
2540 .in_cables = 0x0001
2541 }
2542 },
2543 {
2544 .ifnum = -1
2545 }
2546 }
2547 }
2548},
2549{
2550 /* Steinberg MI4 */
2551 USB_DEVICE_VENDOR_SPEC(0x0a4e, 0x4040),
2552 .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
2553 .ifnum = QUIRK_ANY_INTERFACE,
2554 .type = QUIRK_COMPOSITE,
2555 .data = & (const struct snd_usb_audio_quirk[]) {
2556 {
2557 .ifnum = 0,
2558 .type = QUIRK_AUDIO_STANDARD_INTERFACE
2559 },
2560 {
2561 .ifnum = 1,
2562 .type = QUIRK_AUDIO_STANDARD_INTERFACE
2563 },
2564 {
2565 .ifnum = 2,
2566 .type = QUIRK_AUDIO_STANDARD_INTERFACE
2567 },
2568 {
2569 .ifnum = 3,
2570 .type = QUIRK_MIDI_FIXED_ENDPOINT,
2571 .data = &(const struct snd_usb_midi_endpoint_info) {
2572 .out_cables = 0x0001,
2573 .in_cables = 0x0001
2574 }
2575 },
2576 {
2577 .ifnum = -1
2578 }
2579 }
2580 }
2581},
2582
2515/* TerraTec devices */ 2583/* TerraTec devices */
2516{ 2584{
2517 USB_DEVICE_VENDOR_SPEC(0x0ccd, 0x0012), 2585 USB_DEVICE_VENDOR_SPEC(0x0ccd, 0x0012),
diff --git a/tools/lib/api/Makefile b/tools/lib/api/Makefile
index 8bd960658463..fe1b02c2c95b 100644
--- a/tools/lib/api/Makefile
+++ b/tools/lib/api/Makefile
@@ -36,7 +36,7 @@ $(LIBFILE): $(API_IN)
36 36
37clean: 37clean:
38 $(call QUIET_CLEAN, libapi) $(RM) $(LIBFILE); \ 38 $(call QUIET_CLEAN, libapi) $(RM) $(LIBFILE); \
39 find $(if $(OUTPUT),$(OUTPUT),.) -name \*.o | xargs $(RM) 39 find $(if $(OUTPUT),$(OUTPUT),.) -name \*.o -or -name \*.o.cmd -or -name \*.o.d | xargs $(RM)
40 40
41FORCE: 41FORCE:
42 42
diff --git a/tools/lib/hweight.c b/tools/lib/hweight.c
new file mode 100644
index 000000000000..0b859b884339
--- /dev/null
+++ b/tools/lib/hweight.c
@@ -0,0 +1,62 @@
1#include <linux/bitops.h>
2#include <asm/types.h>
3
4/**
5 * hweightN - returns the hamming weight of a N-bit word
6 * @x: the word to weigh
7 *
8 * The Hamming Weight of a number is the total number of bits set in it.
9 */
10
11unsigned int __sw_hweight32(unsigned int w)
12{
13#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
14 w -= (w >> 1) & 0x55555555;
15 w = (w & 0x33333333) + ((w >> 2) & 0x33333333);
16 w = (w + (w >> 4)) & 0x0f0f0f0f;
17 return (w * 0x01010101) >> 24;
18#else
19 unsigned int res = w - ((w >> 1) & 0x55555555);
20 res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
21 res = (res + (res >> 4)) & 0x0F0F0F0F;
22 res = res + (res >> 8);
23 return (res + (res >> 16)) & 0x000000FF;
24#endif
25}
26
27unsigned int __sw_hweight16(unsigned int w)
28{
29 unsigned int res = w - ((w >> 1) & 0x5555);
30 res = (res & 0x3333) + ((res >> 2) & 0x3333);
31 res = (res + (res >> 4)) & 0x0F0F;
32 return (res + (res >> 8)) & 0x00FF;
33}
34
35unsigned int __sw_hweight8(unsigned int w)
36{
37 unsigned int res = w - ((w >> 1) & 0x55);
38 res = (res & 0x33) + ((res >> 2) & 0x33);
39 return (res + (res >> 4)) & 0x0F;
40}
41
42unsigned long __sw_hweight64(__u64 w)
43{
44#if BITS_PER_LONG == 32
45 return __sw_hweight32((unsigned int)(w >> 32)) +
46 __sw_hweight32((unsigned int)w);
47#elif BITS_PER_LONG == 64
48#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
49 w -= (w >> 1) & 0x5555555555555555ul;
50 w = (w & 0x3333333333333333ul) + ((w >> 2) & 0x3333333333333333ul);
51 w = (w + (w >> 4)) & 0x0f0f0f0f0f0f0f0ful;
52 return (w * 0x0101010101010101ul) >> 56;
53#else
54 __u64 res = w - ((w >> 1) & 0x5555555555555555ul);
55 res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
56 res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
57 res = res + (res >> 8);
58 res = res + (res >> 16);
59 return (res + (res >> 32)) & 0x00000000000000FFul;
60#endif
61#endif
62}
diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile
index 6daaff652aff..7851df1490e0 100644
--- a/tools/lib/traceevent/Makefile
+++ b/tools/lib/traceevent/Makefile
@@ -268,7 +268,7 @@ install: install_lib
268 268
269clean: 269clean:
270 $(call QUIET_CLEAN, libtraceevent) \ 270 $(call QUIET_CLEAN, libtraceevent) \
271 $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d \ 271 $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d .*.cmd \
272 $(RM) TRACEEVENT-CFLAGS tags TAGS 272 $(RM) TRACEEVENT-CFLAGS tags TAGS
273 273
274PHONY += force plugins 274PHONY += force plugins
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST
index 09dc0aabb515..d01a0aad5a01 100644
--- a/tools/perf/MANIFEST
+++ b/tools/perf/MANIFEST
@@ -18,6 +18,7 @@ tools/arch/x86/include/asm/atomic.h
18tools/arch/x86/include/asm/rmwcc.h 18tools/arch/x86/include/asm/rmwcc.h
19tools/lib/traceevent 19tools/lib/traceevent
20tools/lib/api 20tools/lib/api
21tools/lib/hweight.c
21tools/lib/rbtree.c 22tools/lib/rbtree.c
22tools/lib/symbol/kallsyms.c 23tools/lib/symbol/kallsyms.c
23tools/lib/symbol/kallsyms.h 24tools/lib/symbol/kallsyms.h
@@ -57,7 +58,6 @@ include/linux/perf_event.h
57include/linux/list.h 58include/linux/list.h
58include/linux/hash.h 59include/linux/hash.h
59include/linux/stringify.h 60include/linux/stringify.h
60lib/hweight.c
61include/linux/swab.h 61include/linux/swab.h
62arch/*/include/asm/unistd*.h 62arch/*/include/asm/unistd*.h
63arch/*/include/uapi/asm/unistd*.h 63arch/*/include/uapi/asm/unistd*.h
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 7a4b549214e3..bba34636b733 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -109,9 +109,22 @@ $(OUTPUT)PERF-VERSION-FILE: ../../.git/HEAD
109 $(Q)$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT) 109 $(Q)$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT)
110 $(Q)touch $(OUTPUT)PERF-VERSION-FILE 110 $(Q)touch $(OUTPUT)PERF-VERSION-FILE
111 111
112CC = $(CROSS_COMPILE)gcc 112# Makefiles suck: This macro sets a default value of $(2) for the
113LD ?= $(CROSS_COMPILE)ld 113# variable named by $(1), unless the variable has been set by
114AR = $(CROSS_COMPILE)ar 114# environment or command line. This is necessary for CC and AR
115# because make sets default values, so the simpler ?= approach
116# won't work as expected.
117define allow-override
118 $(if $(or $(findstring environment,$(origin $(1))),\
119 $(findstring command line,$(origin $(1)))),,\
120 $(eval $(1) = $(2)))
121endef
122
123# Allow setting CC and AR and LD, or setting CROSS_COMPILE as a prefix.
124$(call allow-override,CC,$(CROSS_COMPILE)gcc)
125$(call allow-override,AR,$(CROSS_COMPILE)ar)
126$(call allow-override,LD,$(CROSS_COMPILE)ld)
127
115PKG_CONFIG = $(CROSS_COMPILE)pkg-config 128PKG_CONFIG = $(CROSS_COMPILE)pkg-config
116 129
117RM = rm -f 130RM = rm -f
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index de165a1b9240..20b56eb987f8 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -521,6 +521,15 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
521 goto out_child; 521 goto out_child;
522 } 522 }
523 523
524 /*
525 * Normally perf_session__new would do this, but it doesn't have the
526 * evlist.
527 */
528 if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
529 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
530 rec->tool.ordered_events = false;
531 }
532
524 if (!rec->evlist->nr_groups) 533 if (!rec->evlist->nr_groups)
525 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC); 534 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
526 535
@@ -965,9 +974,11 @@ static struct record record = {
965 .tool = { 974 .tool = {
966 .sample = process_sample_event, 975 .sample = process_sample_event,
967 .fork = perf_event__process_fork, 976 .fork = perf_event__process_fork,
977 .exit = perf_event__process_exit,
968 .comm = perf_event__process_comm, 978 .comm = perf_event__process_comm,
969 .mmap = perf_event__process_mmap, 979 .mmap = perf_event__process_mmap,
970 .mmap2 = perf_event__process_mmap2, 980 .mmap2 = perf_event__process_mmap2,
981 .ordered_events = true,
971 }, 982 },
972}; 983};
973 984
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 37e301a32f43..d99d850e1444 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -343,7 +343,7 @@ static int read_counter(struct perf_evsel *counter)
343 return 0; 343 return 0;
344} 344}
345 345
346static void read_counters(bool close) 346static void read_counters(bool close_counters)
347{ 347{
348 struct perf_evsel *counter; 348 struct perf_evsel *counter;
349 349
@@ -354,7 +354,7 @@ static void read_counters(bool close)
354 if (process_counter(counter)) 354 if (process_counter(counter))
355 pr_warning("failed to process counter %s\n", counter->name); 355 pr_warning("failed to process counter %s\n", counter->name);
356 356
357 if (close) { 357 if (close_counters) {
358 perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter), 358 perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter),
359 thread_map__nr(evsel_list->threads)); 359 thread_map__nr(evsel_list->threads));
360 } 360 }
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index ecf319728f25..6135cc07213c 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -601,8 +601,8 @@ static void display_sig(int sig __maybe_unused)
601 601
602static void display_setup_sig(void) 602static void display_setup_sig(void)
603{ 603{
604 signal(SIGSEGV, display_sig); 604 signal(SIGSEGV, sighandler_dump_stack);
605 signal(SIGFPE, display_sig); 605 signal(SIGFPE, sighandler_dump_stack);
606 signal(SIGINT, display_sig); 606 signal(SIGINT, display_sig);
607 signal(SIGQUIT, display_sig); 607 signal(SIGQUIT, display_sig);
608 signal(SIGTERM, display_sig); 608 signal(SIGTERM, display_sig);
diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile
index 094ddaee104c..d31fac19c30b 100644
--- a/tools/perf/config/Makefile
+++ b/tools/perf/config/Makefile
@@ -638,7 +638,7 @@ ifndef DESTDIR
638prefix ?= $(HOME) 638prefix ?= $(HOME)
639endif 639endif
640bindir_relative = bin 640bindir_relative = bin
641bindir = $(prefix)/$(bindir_relative) 641bindir = $(abspath $(prefix)/$(bindir_relative))
642mandir = share/man 642mandir = share/man
643infodir = share/info 643infodir = share/info
644perfexecdir = libexec/perf-core 644perfexecdir = libexec/perf-core
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index 7629bef2fd79..fa67613976a8 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -48,7 +48,7 @@ static struct rb_node *hists__filter_entries(struct rb_node *nd,
48 48
49static bool hist_browser__has_filter(struct hist_browser *hb) 49static bool hist_browser__has_filter(struct hist_browser *hb)
50{ 50{
51 return hists__has_filter(hb->hists) || hb->min_pcnt; 51 return hists__has_filter(hb->hists) || hb->min_pcnt || symbol_conf.has_filter;
52} 52}
53 53
54static int hist_browser__get_folding(struct hist_browser *browser) 54static int hist_browser__get_folding(struct hist_browser *browser)
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index 601d11440596..d2d318c59b37 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -143,6 +143,6 @@ $(OUTPUT)util/rbtree.o: ../lib/rbtree.c FORCE
143 $(call rule_mkdir) 143 $(call rule_mkdir)
144 $(call if_changed_dep,cc_o_c) 144 $(call if_changed_dep,cc_o_c)
145 145
146$(OUTPUT)util/hweight.o: ../../lib/hweight.c FORCE 146$(OUTPUT)util/hweight.o: ../lib/hweight.c FORCE
147 $(call rule_mkdir) 147 $(call rule_mkdir)
148 $(call if_changed_dep,cc_o_c) 148 $(call if_changed_dep,cc_o_c)
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index 7e7405c9b936..83d9dd96fe08 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -53,11 +53,6 @@ int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
53{ 53{
54 struct perf_event_mmap_page *pc = userpg; 54 struct perf_event_mmap_page *pc = userpg;
55 55
56#if BITS_PER_LONG != 64 && !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
57 pr_err("Cannot use AUX area tracing mmaps\n");
58 return -1;
59#endif
60
61 WARN_ONCE(mm->base, "Uninitialized auxtrace_mmap\n"); 56 WARN_ONCE(mm->base, "Uninitialized auxtrace_mmap\n");
62 57
63 mm->userpg = userpg; 58 mm->userpg = userpg;
@@ -73,6 +68,11 @@ int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
73 return 0; 68 return 0;
74 } 69 }
75 70
71#if BITS_PER_LONG != 64 && !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
72 pr_err("Cannot use AUX area tracing mmaps\n");
73 return -1;
74#endif
75
76 pc->aux_offset = mp->offset; 76 pc->aux_offset = mp->offset;
77 pc->aux_size = mp->len; 77 pc->aux_size = mp->len;
78 78
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 7ff682770fdb..f1a4c833121e 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -1387,6 +1387,24 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event
1387 event->fork.ptid); 1387 event->fork.ptid);
1388 int err = 0; 1388 int err = 0;
1389 1389
1390 if (dump_trace)
1391 perf_event__fprintf_task(event, stdout);
1392
1393 /*
1394 * There may be an existing thread that is not actually the parent,
1395 * either because we are processing events out of order, or because the
1396 * (fork) event that would have removed the thread was lost. Assume the
1397 * latter case and continue on as best we can.
1398 */
1399 if (parent->pid_ != (pid_t)event->fork.ppid) {
1400 dump_printf("removing erroneous parent thread %d/%d\n",
1401 parent->pid_, parent->tid);
1402 machine__remove_thread(machine, parent);
1403 thread__put(parent);
1404 parent = machine__findnew_thread(machine, event->fork.ppid,
1405 event->fork.ptid);
1406 }
1407
1390 /* if a thread currently exists for the thread id remove it */ 1408 /* if a thread currently exists for the thread id remove it */
1391 if (thread != NULL) { 1409 if (thread != NULL) {
1392 machine__remove_thread(machine, thread); 1410 machine__remove_thread(machine, thread);
@@ -1395,8 +1413,6 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event
1395 1413
1396 thread = machine__findnew_thread(machine, event->fork.pid, 1414 thread = machine__findnew_thread(machine, event->fork.pid,
1397 event->fork.tid); 1415 event->fork.tid);
1398 if (dump_trace)
1399 perf_event__fprintf_task(event, stdout);
1400 1416
1401 if (thread == NULL || parent == NULL || 1417 if (thread == NULL || parent == NULL ||
1402 thread__fork(thread, parent, sample->time) < 0) { 1418 thread__fork(thread, parent, sample->time) < 0) {
diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources
index e23ded40c79e..0766d98c5da5 100644
--- a/tools/perf/util/python-ext-sources
+++ b/tools/perf/util/python-ext-sources
@@ -10,7 +10,7 @@ util/ctype.c
10util/evlist.c 10util/evlist.c
11util/evsel.c 11util/evsel.c
12util/cpumap.c 12util/cpumap.c
13../../lib/hweight.c 13../lib/hweight.c
14util/thread_map.c 14util/thread_map.c
15util/util.c 15util/util.c
16util/xyarray.c 16util/xyarray.c
@@ -19,5 +19,5 @@ util/rblist.c
19util/stat.c 19util/stat.c
20util/strlist.c 20util/strlist.c
21util/trace-event.c 21util/trace-event.c
22../../lib/rbtree.c 22../lib/rbtree.c
23util/string.c 23util/string.c
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 53e8bb7bc852..2a5d8d7698ae 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -85,7 +85,7 @@ void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count,
85 else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES)) 85 else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
86 update_stats(&runtime_cycles_stats[ctx][cpu], count[0]); 86 update_stats(&runtime_cycles_stats[ctx][cpu], count[0]);
87 else if (perf_stat_evsel__is(counter, CYCLES_IN_TX)) 87 else if (perf_stat_evsel__is(counter, CYCLES_IN_TX))
88 update_stats(&runtime_transaction_stats[ctx][cpu], count[0]); 88 update_stats(&runtime_cycles_in_tx_stats[ctx][cpu], count[0]);
89 else if (perf_stat_evsel__is(counter, TRANSACTION_START)) 89 else if (perf_stat_evsel__is(counter, TRANSACTION_START))
90 update_stats(&runtime_transaction_stats[ctx][cpu], count[0]); 90 update_stats(&runtime_transaction_stats[ctx][cpu], count[0]);
91 else if (perf_stat_evsel__is(counter, ELISION_START)) 91 else if (perf_stat_evsel__is(counter, ELISION_START))
@@ -398,20 +398,18 @@ void perf_stat__print_shadow_stats(FILE *out, struct perf_evsel *evsel,
398 " # %5.2f%% aborted cycles ", 398 " # %5.2f%% aborted cycles ",
399 100.0 * ((total2-avg) / total)); 399 100.0 * ((total2-avg) / total));
400 } else if (perf_stat_evsel__is(evsel, TRANSACTION_START) && 400 } else if (perf_stat_evsel__is(evsel, TRANSACTION_START) &&
401 avg > 0 &&
402 runtime_cycles_in_tx_stats[ctx][cpu].n != 0) { 401 runtime_cycles_in_tx_stats[ctx][cpu].n != 0) {
403 total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]); 402 total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
404 403
405 if (total) 404 if (avg)
406 ratio = total / avg; 405 ratio = total / avg;
407 406
408 fprintf(out, " # %8.0f cycles / transaction ", ratio); 407 fprintf(out, " # %8.0f cycles / transaction ", ratio);
409 } else if (perf_stat_evsel__is(evsel, ELISION_START) && 408 } else if (perf_stat_evsel__is(evsel, ELISION_START) &&
410 avg > 0 &&
411 runtime_cycles_in_tx_stats[ctx][cpu].n != 0) { 409 runtime_cycles_in_tx_stats[ctx][cpu].n != 0) {
412 total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]); 410 total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
413 411
414 if (total) 412 if (avg)
415 ratio = total / avg; 413 ratio = total / avg;
416 414
417 fprintf(out, " # %8.0f cycles / elision ", ratio); 415 fprintf(out, " # %8.0f cycles / elision ", ratio);
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 48b588c6951a..60f11414bb5c 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1911,6 +1911,8 @@ int setup_list(struct strlist **list, const char *list_str,
1911 pr_err("problems parsing %s list\n", list_name); 1911 pr_err("problems parsing %s list\n", list_name);
1912 return -1; 1912 return -1;
1913 } 1913 }
1914
1915 symbol_conf.has_filter = true;
1914 return 0; 1916 return 0;
1915} 1917}
1916 1918
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index bef47ead1d9b..b98ce51af142 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -105,7 +105,8 @@ struct symbol_conf {
105 demangle_kernel, 105 demangle_kernel,
106 filter_relative, 106 filter_relative,
107 show_hist_headers, 107 show_hist_headers,
108 branch_callstack; 108 branch_callstack,
109 has_filter;
109 const char *vmlinux_name, 110 const char *vmlinux_name,
110 *kallsyms_name, 111 *kallsyms_name,
111 *source_prefix, 112 *source_prefix,
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 28c4b746baa1..0a9ae8014729 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -191,6 +191,12 @@ static int thread__clone_map_groups(struct thread *thread,
191 if (thread->pid_ == parent->pid_) 191 if (thread->pid_ == parent->pid_)
192 return 0; 192 return 0;
193 193
194 if (thread->mg == parent->mg) {
195 pr_debug("broken map groups on thread %d/%d parent %d/%d\n",
196 thread->pid_, thread->tid, parent->pid_, parent->tid);
197 return 0;
198 }
199
194 /* But this one is new process, copy maps. */ 200 /* But this one is new process, copy maps. */
195 for (i = 0; i < MAP__NR_TYPES; ++i) 201 for (i = 0; i < MAP__NR_TYPES; ++i)
196 if (map_groups__clone(thread->mg, parent->mg, i) < 0) 202 if (map_groups__clone(thread->mg, parent->mg, i) < 0)
diff --git a/tools/perf/util/thread_map.c b/tools/perf/util/thread_map.c
index da7646d767fe..292ae2c90e06 100644
--- a/tools/perf/util/thread_map.c
+++ b/tools/perf/util/thread_map.c
@@ -136,8 +136,7 @@ struct thread_map *thread_map__new_by_uid(uid_t uid)
136 if (grow) { 136 if (grow) {
137 struct thread_map *tmp; 137 struct thread_map *tmp;
138 138
139 tmp = realloc(threads, (sizeof(*threads) + 139 tmp = thread_map__realloc(threads, max_threads);
140 max_threads * sizeof(pid_t)));
141 if (tmp == NULL) 140 if (tmp == NULL)
142 goto out_free_namelist; 141 goto out_free_namelist;
143 142
diff --git a/tools/perf/util/vdso.c b/tools/perf/util/vdso.c
index 4b89118f158d..44d440da15dc 100644
--- a/tools/perf/util/vdso.c
+++ b/tools/perf/util/vdso.c
@@ -236,18 +236,16 @@ static struct dso *__machine__findnew_compat(struct machine *machine,
236 const char *file_name; 236 const char *file_name;
237 struct dso *dso; 237 struct dso *dso;
238 238
239 pthread_rwlock_wrlock(&machine->dsos.lock);
240 dso = __dsos__find(&machine->dsos, vdso_file->dso_name, true); 239 dso = __dsos__find(&machine->dsos, vdso_file->dso_name, true);
241 if (dso) 240 if (dso)
242 goto out_unlock; 241 goto out;
243 242
244 file_name = vdso__get_compat_file(vdso_file); 243 file_name = vdso__get_compat_file(vdso_file);
245 if (!file_name) 244 if (!file_name)
246 goto out_unlock; 245 goto out;
247 246
248 dso = __machine__addnew_vdso(machine, vdso_file->dso_name, file_name); 247 dso = __machine__addnew_vdso(machine, vdso_file->dso_name, file_name);
249out_unlock: 248out:
250 pthread_rwlock_unlock(&machine->dsos.lock);
251 return dso; 249 return dso;
252} 250}
253 251
diff --git a/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c b/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c
index 7f0c756993af..3d7dc6afc3f8 100644
--- a/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c
+++ b/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c
@@ -191,7 +191,7 @@ int main(int argc, char *argv[])
191 if (res > 0) { 191 if (res > 0) {
192 atomic_set(&requeued, 1); 192 atomic_set(&requeued, 1);
193 break; 193 break;
194 } else if (res > 0) { 194 } else if (res < 0) {
195 error("FUTEX_CMP_REQUEUE_PI failed\n", errno); 195 error("FUTEX_CMP_REQUEUE_PI failed\n", errno);
196 ret = RET_ERROR; 196 ret = RET_ERROR;
197 break; 197 break;
diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c
index 620e37f741b8..1dd087da6f31 100644
--- a/virt/kvm/vfio.c
+++ b/virt/kvm/vfio.c
@@ -155,6 +155,8 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
155 list_add_tail(&kvg->node, &kv->group_list); 155 list_add_tail(&kvg->node, &kv->group_list);
156 kvg->vfio_group = vfio_group; 156 kvg->vfio_group = vfio_group;
157 157
158 kvm_arch_start_assignment(dev->kvm);
159
158 mutex_unlock(&kv->lock); 160 mutex_unlock(&kv->lock);
159 161
160 kvm_vfio_update_coherency(dev); 162 kvm_vfio_update_coherency(dev);
@@ -190,6 +192,8 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
190 break; 192 break;
191 } 193 }
192 194
195 kvm_arch_end_assignment(dev->kvm);
196
193 mutex_unlock(&kv->lock); 197 mutex_unlock(&kv->lock);
194 198
195 kvm_vfio_group_put_external_user(vfio_group); 199 kvm_vfio_group_put_external_user(vfio_group);
@@ -239,6 +243,7 @@ static void kvm_vfio_destroy(struct kvm_device *dev)
239 kvm_vfio_group_put_external_user(kvg->vfio_group); 243 kvm_vfio_group_put_external_user(kvg->vfio_group);
240 list_del(&kvg->node); 244 list_del(&kvg->node);
241 kfree(kvg); 245 kfree(kvg);
246 kvm_arch_end_assignment(dev->kvm);
242 } 247 }
243 248
244 kvm_vfio_update_coherency(dev); 249 kvm_vfio_update_coherency(dev);